prompt
stringlengths 162
4.26M
| response
stringlengths 109
5.16M
|
---|---|
Generate the Verilog code corresponding to the following Chisel files.
File MSHR.scala:
/*
* Copyright 2019 SiFive, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You should have received a copy of LICENSE.Apache2 along with
* this software. If not, you may obtain a copy at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sifive.blocks.inclusivecache
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import freechips.rocketchip.tilelink._
import TLPermissions._
import TLMessages._
import MetaData._
import chisel3.PrintableHelper
import chisel3.experimental.dataview._
class ScheduleRequest(params: InclusiveCacheParameters) extends InclusiveCacheBundle(params)
{
val a = Valid(new SourceARequest(params))
val b = Valid(new SourceBRequest(params))
val c = Valid(new SourceCRequest(params))
val d = Valid(new SourceDRequest(params))
val e = Valid(new SourceERequest(params))
val x = Valid(new SourceXRequest(params))
val dir = Valid(new DirectoryWrite(params))
val reload = Bool() // get next request via allocate (if any)
}
class MSHRStatus(params: InclusiveCacheParameters) extends InclusiveCacheBundle(params)
{
val set = UInt(params.setBits.W)
val tag = UInt(params.tagBits.W)
val way = UInt(params.wayBits.W)
val blockB = Bool()
val nestB = Bool()
val blockC = Bool()
val nestC = Bool()
}
class NestedWriteback(params: InclusiveCacheParameters) extends InclusiveCacheBundle(params)
{
val set = UInt(params.setBits.W)
val tag = UInt(params.tagBits.W)
val b_toN = Bool() // nested Probes may unhit us
val b_toB = Bool() // nested Probes may demote us
val b_clr_dirty = Bool() // nested Probes clear dirty
val c_set_dirty = Bool() // nested Releases MAY set dirty
}
sealed trait CacheState
{
val code = CacheState.index.U
CacheState.index = CacheState.index + 1
}
object CacheState
{
var index = 0
}
case object S_INVALID extends CacheState
case object S_BRANCH extends CacheState
case object S_BRANCH_C extends CacheState
case object S_TIP extends CacheState
case object S_TIP_C extends CacheState
case object S_TIP_CD extends CacheState
case object S_TIP_D extends CacheState
case object S_TRUNK_C extends CacheState
case object S_TRUNK_CD extends CacheState
class MSHR(params: InclusiveCacheParameters) extends Module
{
val io = IO(new Bundle {
val allocate = Flipped(Valid(new AllocateRequest(params))) // refills MSHR for next cycle
val directory = Flipped(Valid(new DirectoryResult(params))) // triggers schedule setup
val status = Valid(new MSHRStatus(params))
val schedule = Decoupled(new ScheduleRequest(params))
val sinkc = Flipped(Valid(new SinkCResponse(params)))
val sinkd = Flipped(Valid(new SinkDResponse(params)))
val sinke = Flipped(Valid(new SinkEResponse(params)))
val nestedwb = Flipped(new NestedWriteback(params))
})
val request_valid = RegInit(false.B)
val request = Reg(new FullRequest(params))
val meta_valid = RegInit(false.B)
val meta = Reg(new DirectoryResult(params))
// Define which states are valid
when (meta_valid) {
when (meta.state === INVALID) {
assert (!meta.clients.orR)
assert (!meta.dirty)
}
when (meta.state === BRANCH) {
assert (!meta.dirty)
}
when (meta.state === TRUNK) {
assert (meta.clients.orR)
assert ((meta.clients & (meta.clients - 1.U)) === 0.U) // at most one
}
when (meta.state === TIP) {
// noop
}
}
// Completed transitions (s_ = scheduled), (w_ = waiting)
val s_rprobe = RegInit(true.B) // B
val w_rprobeackfirst = RegInit(true.B)
val w_rprobeacklast = RegInit(true.B)
val s_release = RegInit(true.B) // CW w_rprobeackfirst
val w_releaseack = RegInit(true.B)
val s_pprobe = RegInit(true.B) // B
val s_acquire = RegInit(true.B) // A s_release, s_pprobe [1]
val s_flush = RegInit(true.B) // X w_releaseack
val w_grantfirst = RegInit(true.B)
val w_grantlast = RegInit(true.B)
val w_grant = RegInit(true.B) // first | last depending on wormhole
val w_pprobeackfirst = RegInit(true.B)
val w_pprobeacklast = RegInit(true.B)
val w_pprobeack = RegInit(true.B) // first | last depending on wormhole
val s_probeack = RegInit(true.B) // C w_pprobeackfirst (mutually exclusive with next two s_*)
val s_grantack = RegInit(true.B) // E w_grantfirst ... CAN require both outE&inD to service outD
val s_execute = RegInit(true.B) // D w_pprobeack, w_grant
val w_grantack = RegInit(true.B)
val s_writeback = RegInit(true.B) // W w_*
// [1]: We cannot issue outer Acquire while holding blockB (=> outA can stall)
// However, inB and outC are higher priority than outB, so s_release and s_pprobe
// may be safely issued while blockB. Thus we must NOT try to schedule the
// potentially stuck s_acquire with either of them (scheduler is all or none).
// Meta-data that we discover underway
val sink = Reg(UInt(params.outer.bundle.sinkBits.W))
val gotT = Reg(Bool())
val bad_grant = Reg(Bool())
val probes_done = Reg(UInt(params.clientBits.W))
val probes_toN = Reg(UInt(params.clientBits.W))
val probes_noT = Reg(Bool())
// When a nested transaction completes, update our meta data
when (meta_valid && meta.state =/= INVALID &&
io.nestedwb.set === request.set && io.nestedwb.tag === meta.tag) {
when (io.nestedwb.b_clr_dirty) { meta.dirty := false.B }
when (io.nestedwb.c_set_dirty) { meta.dirty := true.B }
when (io.nestedwb.b_toB) { meta.state := BRANCH }
when (io.nestedwb.b_toN) { meta.hit := false.B }
}
// Scheduler status
io.status.valid := request_valid
io.status.bits.set := request.set
io.status.bits.tag := request.tag
io.status.bits.way := meta.way
io.status.bits.blockB := !meta_valid || ((!w_releaseack || !w_rprobeacklast || !w_pprobeacklast) && !w_grantfirst)
io.status.bits.nestB := meta_valid && w_releaseack && w_rprobeacklast && w_pprobeacklast && !w_grantfirst
// The above rules ensure we will block and not nest an outer probe while still doing our
// own inner probes. Thus every probe wakes exactly one MSHR.
io.status.bits.blockC := !meta_valid
io.status.bits.nestC := meta_valid && (!w_rprobeackfirst || !w_pprobeackfirst || !w_grantfirst)
// The w_grantfirst in nestC is necessary to deal with:
// acquire waiting for grant, inner release gets queued, outer probe -> inner probe -> deadlock
// ... this is possible because the release+probe can be for same set, but different tag
// We can only demand: block, nest, or queue
assert (!io.status.bits.nestB || !io.status.bits.blockB)
assert (!io.status.bits.nestC || !io.status.bits.blockC)
// Scheduler requests
val no_wait = w_rprobeacklast && w_releaseack && w_grantlast && w_pprobeacklast && w_grantack
io.schedule.bits.a.valid := !s_acquire && s_release && s_pprobe
io.schedule.bits.b.valid := !s_rprobe || !s_pprobe
io.schedule.bits.c.valid := (!s_release && w_rprobeackfirst) || (!s_probeack && w_pprobeackfirst)
io.schedule.bits.d.valid := !s_execute && w_pprobeack && w_grant
io.schedule.bits.e.valid := !s_grantack && w_grantfirst
io.schedule.bits.x.valid := !s_flush && w_releaseack
io.schedule.bits.dir.valid := (!s_release && w_rprobeackfirst) || (!s_writeback && no_wait)
io.schedule.bits.reload := no_wait
io.schedule.valid := io.schedule.bits.a.valid || io.schedule.bits.b.valid || io.schedule.bits.c.valid ||
io.schedule.bits.d.valid || io.schedule.bits.e.valid || io.schedule.bits.x.valid ||
io.schedule.bits.dir.valid
// Schedule completions
when (io.schedule.ready) {
s_rprobe := true.B
when (w_rprobeackfirst) { s_release := true.B }
s_pprobe := true.B
when (s_release && s_pprobe) { s_acquire := true.B }
when (w_releaseack) { s_flush := true.B }
when (w_pprobeackfirst) { s_probeack := true.B }
when (w_grantfirst) { s_grantack := true.B }
when (w_pprobeack && w_grant) { s_execute := true.B }
when (no_wait) { s_writeback := true.B }
// Await the next operation
when (no_wait) {
request_valid := false.B
meta_valid := false.B
}
}
// Resulting meta-data
val final_meta_writeback = WireInit(meta)
val req_clientBit = params.clientBit(request.source)
val req_needT = needT(request.opcode, request.param)
val req_acquire = request.opcode === AcquireBlock || request.opcode === AcquirePerm
val meta_no_clients = !meta.clients.orR
val req_promoteT = req_acquire && Mux(meta.hit, meta_no_clients && meta.state === TIP, gotT)
when (request.prio(2) && (!params.firstLevel).B) { // always a hit
final_meta_writeback.dirty := meta.dirty || request.opcode(0)
final_meta_writeback.state := Mux(request.param =/= TtoT && meta.state === TRUNK, TIP, meta.state)
final_meta_writeback.clients := meta.clients & ~Mux(isToN(request.param), req_clientBit, 0.U)
final_meta_writeback.hit := true.B // chained requests are hits
} .elsewhen (request.control && params.control.B) { // request.prio(0)
when (meta.hit) {
final_meta_writeback.dirty := false.B
final_meta_writeback.state := INVALID
final_meta_writeback.clients := meta.clients & ~probes_toN
}
final_meta_writeback.hit := false.B
} .otherwise {
final_meta_writeback.dirty := (meta.hit && meta.dirty) || !request.opcode(2)
final_meta_writeback.state := Mux(req_needT,
Mux(req_acquire, TRUNK, TIP),
Mux(!meta.hit, Mux(gotT, Mux(req_acquire, TRUNK, TIP), BRANCH),
MuxLookup(meta.state, 0.U(2.W))(Seq(
INVALID -> BRANCH,
BRANCH -> BRANCH,
TRUNK -> TIP,
TIP -> Mux(meta_no_clients && req_acquire, TRUNK, TIP)))))
final_meta_writeback.clients := Mux(meta.hit, meta.clients & ~probes_toN, 0.U) |
Mux(req_acquire, req_clientBit, 0.U)
final_meta_writeback.tag := request.tag
final_meta_writeback.hit := true.B
}
when (bad_grant) {
when (meta.hit) {
// upgrade failed (B -> T)
assert (!meta_valid || meta.state === BRANCH)
final_meta_writeback.hit := true.B
final_meta_writeback.dirty := false.B
final_meta_writeback.state := BRANCH
final_meta_writeback.clients := meta.clients & ~probes_toN
} .otherwise {
// failed N -> (T or B)
final_meta_writeback.hit := false.B
final_meta_writeback.dirty := false.B
final_meta_writeback.state := INVALID
final_meta_writeback.clients := 0.U
}
}
val invalid = Wire(new DirectoryEntry(params))
invalid.dirty := false.B
invalid.state := INVALID
invalid.clients := 0.U
invalid.tag := 0.U
// Just because a client says BtoT, by the time we process the request he may be N.
// Therefore, we must consult our own meta-data state to confirm he owns the line still.
val honour_BtoT = meta.hit && (meta.clients & req_clientBit).orR
// The client asking us to act is proof they don't have permissions.
val excluded_client = Mux(meta.hit && request.prio(0) && skipProbeN(request.opcode, params.cache.hintsSkipProbe), req_clientBit, 0.U)
io.schedule.bits.a.bits.tag := request.tag
io.schedule.bits.a.bits.set := request.set
io.schedule.bits.a.bits.param := Mux(req_needT, Mux(meta.hit, BtoT, NtoT), NtoB)
io.schedule.bits.a.bits.block := request.size =/= log2Ceil(params.cache.blockBytes).U ||
!(request.opcode === PutFullData || request.opcode === AcquirePerm)
io.schedule.bits.a.bits.source := 0.U
io.schedule.bits.b.bits.param := Mux(!s_rprobe, toN, Mux(request.prio(1), request.param, Mux(req_needT, toN, toB)))
io.schedule.bits.b.bits.tag := Mux(!s_rprobe, meta.tag, request.tag)
io.schedule.bits.b.bits.set := request.set
io.schedule.bits.b.bits.clients := meta.clients & ~excluded_client
io.schedule.bits.c.bits.opcode := Mux(meta.dirty, ReleaseData, Release)
io.schedule.bits.c.bits.param := Mux(meta.state === BRANCH, BtoN, TtoN)
io.schedule.bits.c.bits.source := 0.U
io.schedule.bits.c.bits.tag := meta.tag
io.schedule.bits.c.bits.set := request.set
io.schedule.bits.c.bits.way := meta.way
io.schedule.bits.c.bits.dirty := meta.dirty
io.schedule.bits.d.bits.viewAsSupertype(chiselTypeOf(request)) := request
io.schedule.bits.d.bits.param := Mux(!req_acquire, request.param,
MuxLookup(request.param, request.param)(Seq(
NtoB -> Mux(req_promoteT, NtoT, NtoB),
BtoT -> Mux(honour_BtoT, BtoT, NtoT),
NtoT -> NtoT)))
io.schedule.bits.d.bits.sink := 0.U
io.schedule.bits.d.bits.way := meta.way
io.schedule.bits.d.bits.bad := bad_grant
io.schedule.bits.e.bits.sink := sink
io.schedule.bits.x.bits.fail := false.B
io.schedule.bits.dir.bits.set := request.set
io.schedule.bits.dir.bits.way := meta.way
io.schedule.bits.dir.bits.data := Mux(!s_release, invalid, WireInit(new DirectoryEntry(params), init = final_meta_writeback))
// Coverage of state transitions
def cacheState(entry: DirectoryEntry, hit: Bool) = {
val out = WireDefault(0.U)
val c = entry.clients.orR
val d = entry.dirty
switch (entry.state) {
is (BRANCH) { out := Mux(c, S_BRANCH_C.code, S_BRANCH.code) }
is (TRUNK) { out := Mux(d, S_TRUNK_CD.code, S_TRUNK_C.code) }
is (TIP) { out := Mux(c, Mux(d, S_TIP_CD.code, S_TIP_C.code), Mux(d, S_TIP_D.code, S_TIP.code)) }
is (INVALID) { out := S_INVALID.code }
}
when (!hit) { out := S_INVALID.code }
out
}
val p = !params.lastLevel // can be probed
val c = !params.firstLevel // can be acquired
val m = params.inner.client.clients.exists(!_.supports.probe) // can be written (or read)
val r = params.outer.manager.managers.exists(!_.alwaysGrantsT) // read-only devices exist
val f = params.control // flush control register exists
val cfg = (p, c, m, r, f)
val b = r || p // can reach branch state (via probe downgrade or read-only device)
// The cache must be used for something or we would not be here
require(c || m)
val evict = cacheState(meta, !meta.hit)
val before = cacheState(meta, meta.hit)
val after = cacheState(final_meta_writeback, true.B)
def eviction(from: CacheState, cover: Boolean)(implicit sourceInfo: SourceInfo) {
if (cover) {
params.ccover(evict === from.code, s"MSHR_${from}_EVICT", s"State transition from ${from} to evicted ${cfg}")
} else {
assert(!(evict === from.code), cf"State transition from ${from} to evicted should be impossible ${cfg}")
}
if (cover && f) {
params.ccover(before === from.code, s"MSHR_${from}_FLUSH", s"State transition from ${from} to flushed ${cfg}")
} else {
assert(!(before === from.code), cf"State transition from ${from} to flushed should be impossible ${cfg}")
}
}
def transition(from: CacheState, to: CacheState, cover: Boolean)(implicit sourceInfo: SourceInfo) {
if (cover) {
params.ccover(before === from.code && after === to.code, s"MSHR_${from}_${to}", s"State transition from ${from} to ${to} ${cfg}")
} else {
assert(!(before === from.code && after === to.code), cf"State transition from ${from} to ${to} should be impossible ${cfg}")
}
}
when ((!s_release && w_rprobeackfirst) && io.schedule.ready) {
eviction(S_BRANCH, b) // MMIO read to read-only device
eviction(S_BRANCH_C, b && c) // you need children to become C
eviction(S_TIP, true) // MMIO read || clean release can lead to this state
eviction(S_TIP_C, c) // needs two clients || client + mmio || downgrading client
eviction(S_TIP_CD, c) // needs two clients || client + mmio || downgrading client
eviction(S_TIP_D, true) // MMIO write || dirty release lead here
eviction(S_TRUNK_C, c) // acquire for write
eviction(S_TRUNK_CD, c) // dirty release then reacquire
}
when ((!s_writeback && no_wait) && io.schedule.ready) {
transition(S_INVALID, S_BRANCH, b && m) // only MMIO can bring us to BRANCH state
transition(S_INVALID, S_BRANCH_C, b && c) // C state is only possible if there are inner caches
transition(S_INVALID, S_TIP, m) // MMIO read
transition(S_INVALID, S_TIP_C, false) // we would go S_TRUNK_C instead
transition(S_INVALID, S_TIP_CD, false) // acquire does not cause dirty immediately
transition(S_INVALID, S_TIP_D, m) // MMIO write
transition(S_INVALID, S_TRUNK_C, c) // acquire
transition(S_INVALID, S_TRUNK_CD, false) // acquire does not cause dirty immediately
transition(S_BRANCH, S_INVALID, b && p) // probe can do this (flushes run as evictions)
transition(S_BRANCH, S_BRANCH_C, b && c) // acquire
transition(S_BRANCH, S_TIP, b && m) // prefetch write
transition(S_BRANCH, S_TIP_C, false) // we would go S_TRUNK_C instead
transition(S_BRANCH, S_TIP_CD, false) // acquire does not cause dirty immediately
transition(S_BRANCH, S_TIP_D, b && m) // MMIO write
transition(S_BRANCH, S_TRUNK_C, b && c) // acquire
transition(S_BRANCH, S_TRUNK_CD, false) // acquire does not cause dirty immediately
transition(S_BRANCH_C, S_INVALID, b && c && p)
transition(S_BRANCH_C, S_BRANCH, b && c) // clean release (optional)
transition(S_BRANCH_C, S_TIP, b && c && m) // prefetch write
transition(S_BRANCH_C, S_TIP_C, false) // we would go S_TRUNK_C instead
transition(S_BRANCH_C, S_TIP_D, b && c && m) // MMIO write
transition(S_BRANCH_C, S_TIP_CD, false) // going dirty means we must shoot down clients
transition(S_BRANCH_C, S_TRUNK_C, b && c) // acquire
transition(S_BRANCH_C, S_TRUNK_CD, false) // acquire does not cause dirty immediately
transition(S_TIP, S_INVALID, p)
transition(S_TIP, S_BRANCH, p) // losing TIP only possible via probe
transition(S_TIP, S_BRANCH_C, false) // we would go S_TRUNK_C instead
transition(S_TIP, S_TIP_C, false) // we would go S_TRUNK_C instead
transition(S_TIP, S_TIP_D, m) // direct dirty only via MMIO write
transition(S_TIP, S_TIP_CD, false) // acquire does not make us dirty immediately
transition(S_TIP, S_TRUNK_C, c) // acquire
transition(S_TIP, S_TRUNK_CD, false) // acquire does not make us dirty immediately
transition(S_TIP_C, S_INVALID, c && p)
transition(S_TIP_C, S_BRANCH, c && p) // losing TIP only possible via probe
transition(S_TIP_C, S_BRANCH_C, c && p) // losing TIP only possible via probe
transition(S_TIP_C, S_TIP, c) // probed while MMIO read || clean release (optional)
transition(S_TIP_C, S_TIP_D, c && m) // direct dirty only via MMIO write
transition(S_TIP_C, S_TIP_CD, false) // going dirty means we must shoot down clients
transition(S_TIP_C, S_TRUNK_C, c) // acquire
transition(S_TIP_C, S_TRUNK_CD, false) // acquire does not make us immediately dirty
transition(S_TIP_D, S_INVALID, p)
transition(S_TIP_D, S_BRANCH, p) // losing D is only possible via probe
transition(S_TIP_D, S_BRANCH_C, p && c) // probed while acquire shared
transition(S_TIP_D, S_TIP, p) // probed while MMIO read || outer probe.toT (optional)
transition(S_TIP_D, S_TIP_C, false) // we would go S_TRUNK_C instead
transition(S_TIP_D, S_TIP_CD, false) // we would go S_TRUNK_CD instead
transition(S_TIP_D, S_TRUNK_C, p && c) // probed while acquired
transition(S_TIP_D, S_TRUNK_CD, c) // acquire
transition(S_TIP_CD, S_INVALID, c && p)
transition(S_TIP_CD, S_BRANCH, c && p) // losing D is only possible via probe
transition(S_TIP_CD, S_BRANCH_C, c && p) // losing D is only possible via probe
transition(S_TIP_CD, S_TIP, c && p) // probed while MMIO read || outer probe.toT (optional)
transition(S_TIP_CD, S_TIP_C, false) // we would go S_TRUNK_C instead
transition(S_TIP_CD, S_TIP_D, c) // MMIO write || clean release (optional)
transition(S_TIP_CD, S_TRUNK_C, c && p) // probed while acquire
transition(S_TIP_CD, S_TRUNK_CD, c) // acquire
transition(S_TRUNK_C, S_INVALID, c && p)
transition(S_TRUNK_C, S_BRANCH, c && p) // losing TIP only possible via probe
transition(S_TRUNK_C, S_BRANCH_C, c && p) // losing TIP only possible via probe
transition(S_TRUNK_C, S_TIP, c) // MMIO read || clean release (optional)
transition(S_TRUNK_C, S_TIP_C, c) // bounce shared
transition(S_TRUNK_C, S_TIP_D, c) // dirty release
transition(S_TRUNK_C, S_TIP_CD, c) // dirty bounce shared
transition(S_TRUNK_C, S_TRUNK_CD, c) // dirty bounce
transition(S_TRUNK_CD, S_INVALID, c && p)
transition(S_TRUNK_CD, S_BRANCH, c && p) // losing D only possible via probe
transition(S_TRUNK_CD, S_BRANCH_C, c && p) // losing D only possible via probe
transition(S_TRUNK_CD, S_TIP, c && p) // probed while MMIO read || outer probe.toT (optional)
transition(S_TRUNK_CD, S_TIP_C, false) // we would go S_TRUNK_C instead
transition(S_TRUNK_CD, S_TIP_D, c) // dirty release
transition(S_TRUNK_CD, S_TIP_CD, c) // bounce shared
transition(S_TRUNK_CD, S_TRUNK_C, c && p) // probed while acquire
}
// Handle response messages
val probe_bit = params.clientBit(io.sinkc.bits.source)
val last_probe = (probes_done | probe_bit) === (meta.clients & ~excluded_client)
val probe_toN = isToN(io.sinkc.bits.param)
if (!params.firstLevel) when (io.sinkc.valid) {
params.ccover( probe_toN && io.schedule.bits.b.bits.param === toB, "MSHR_PROBE_FULL", "Client downgraded to N when asked only to do B")
params.ccover(!probe_toN && io.schedule.bits.b.bits.param === toB, "MSHR_PROBE_HALF", "Client downgraded to B when asked only to do B")
// Caution: the probe matches us only in set.
// We would never allow an outer probe to nest until both w_[rp]probeack complete, so
// it is safe to just unguardedly update the probe FSM.
probes_done := probes_done | probe_bit
probes_toN := probes_toN | Mux(probe_toN, probe_bit, 0.U)
probes_noT := probes_noT || io.sinkc.bits.param =/= TtoT
w_rprobeackfirst := w_rprobeackfirst || last_probe
w_rprobeacklast := w_rprobeacklast || (last_probe && io.sinkc.bits.last)
w_pprobeackfirst := w_pprobeackfirst || last_probe
w_pprobeacklast := w_pprobeacklast || (last_probe && io.sinkc.bits.last)
// Allow wormhole routing from sinkC if the first request beat has offset 0
val set_pprobeack = last_probe && (io.sinkc.bits.last || request.offset === 0.U)
w_pprobeack := w_pprobeack || set_pprobeack
params.ccover(!set_pprobeack && w_rprobeackfirst, "MSHR_PROBE_SERIAL", "Sequential routing of probe response data")
params.ccover( set_pprobeack && w_rprobeackfirst, "MSHR_PROBE_WORMHOLE", "Wormhole routing of probe response data")
// However, meta-data updates need to be done more cautiously
when (meta.state =/= INVALID && io.sinkc.bits.tag === meta.tag && io.sinkc.bits.data) { meta.dirty := true.B } // !!!
}
when (io.sinkd.valid) {
when (io.sinkd.bits.opcode === Grant || io.sinkd.bits.opcode === GrantData) {
sink := io.sinkd.bits.sink
w_grantfirst := true.B
w_grantlast := io.sinkd.bits.last
// Record if we need to prevent taking ownership
bad_grant := io.sinkd.bits.denied
// Allow wormhole routing for requests whose first beat has offset 0
w_grant := request.offset === 0.U || io.sinkd.bits.last
params.ccover(io.sinkd.bits.opcode === GrantData && request.offset === 0.U, "MSHR_GRANT_WORMHOLE", "Wormhole routing of grant response data")
params.ccover(io.sinkd.bits.opcode === GrantData && request.offset =/= 0.U, "MSHR_GRANT_SERIAL", "Sequential routing of grant response data")
gotT := io.sinkd.bits.param === toT
}
.elsewhen (io.sinkd.bits.opcode === ReleaseAck) {
w_releaseack := true.B
}
}
when (io.sinke.valid) {
w_grantack := true.B
}
// Bootstrap new requests
val allocate_as_full = WireInit(new FullRequest(params), init = io.allocate.bits)
val new_meta = Mux(io.allocate.valid && io.allocate.bits.repeat, final_meta_writeback, io.directory.bits)
val new_request = Mux(io.allocate.valid, allocate_as_full, request)
val new_needT = needT(new_request.opcode, new_request.param)
val new_clientBit = params.clientBit(new_request.source)
val new_skipProbe = Mux(skipProbeN(new_request.opcode, params.cache.hintsSkipProbe), new_clientBit, 0.U)
val prior = cacheState(final_meta_writeback, true.B)
def bypass(from: CacheState, cover: Boolean)(implicit sourceInfo: SourceInfo) {
if (cover) {
params.ccover(prior === from.code, s"MSHR_${from}_BYPASS", s"State bypass transition from ${from} ${cfg}")
} else {
assert(!(prior === from.code), cf"State bypass from ${from} should be impossible ${cfg}")
}
}
when (io.allocate.valid && io.allocate.bits.repeat) {
bypass(S_INVALID, f || p) // Can lose permissions (probe/flush)
bypass(S_BRANCH, b) // MMIO read to read-only device
bypass(S_BRANCH_C, b && c) // you need children to become C
bypass(S_TIP, true) // MMIO read || clean release can lead to this state
bypass(S_TIP_C, c) // needs two clients || client + mmio || downgrading client
bypass(S_TIP_CD, c) // needs two clients || client + mmio || downgrading client
bypass(S_TIP_D, true) // MMIO write || dirty release lead here
bypass(S_TRUNK_C, c) // acquire for write
bypass(S_TRUNK_CD, c) // dirty release then reacquire
}
when (io.allocate.valid) {
assert (!request_valid || (no_wait && io.schedule.fire))
request_valid := true.B
request := io.allocate.bits
}
// Create execution plan
when (io.directory.valid || (io.allocate.valid && io.allocate.bits.repeat)) {
meta_valid := true.B
meta := new_meta
probes_done := 0.U
probes_toN := 0.U
probes_noT := false.B
gotT := false.B
bad_grant := false.B
// These should already be either true or turning true
// We clear them here explicitly to simplify the mux tree
s_rprobe := true.B
w_rprobeackfirst := true.B
w_rprobeacklast := true.B
s_release := true.B
w_releaseack := true.B
s_pprobe := true.B
s_acquire := true.B
s_flush := true.B
w_grantfirst := true.B
w_grantlast := true.B
w_grant := true.B
w_pprobeackfirst := true.B
w_pprobeacklast := true.B
w_pprobeack := true.B
s_probeack := true.B
s_grantack := true.B
s_execute := true.B
w_grantack := true.B
s_writeback := true.B
// For C channel requests (ie: Release[Data])
when (new_request.prio(2) && (!params.firstLevel).B) {
s_execute := false.B
// Do we need to go dirty?
when (new_request.opcode(0) && !new_meta.dirty) {
s_writeback := false.B
}
// Does our state change?
when (isToB(new_request.param) && new_meta.state === TRUNK) {
s_writeback := false.B
}
// Do our clients change?
when (isToN(new_request.param) && (new_meta.clients & new_clientBit) =/= 0.U) {
s_writeback := false.B
}
assert (new_meta.hit)
}
// For X channel requests (ie: flush)
.elsewhen (new_request.control && params.control.B) { // new_request.prio(0)
s_flush := false.B
// Do we need to actually do something?
when (new_meta.hit) {
s_release := false.B
w_releaseack := false.B
// Do we need to shoot-down inner caches?
when ((!params.firstLevel).B && (new_meta.clients =/= 0.U)) {
s_rprobe := false.B
w_rprobeackfirst := false.B
w_rprobeacklast := false.B
}
}
}
// For A channel requests
.otherwise { // new_request.prio(0) && !new_request.control
s_execute := false.B
// Do we need an eviction?
when (!new_meta.hit && new_meta.state =/= INVALID) {
s_release := false.B
w_releaseack := false.B
// Do we need to shoot-down inner caches?
when ((!params.firstLevel).B & (new_meta.clients =/= 0.U)) {
s_rprobe := false.B
w_rprobeackfirst := false.B
w_rprobeacklast := false.B
}
}
// Do we need an acquire?
when (!new_meta.hit || (new_meta.state === BRANCH && new_needT)) {
s_acquire := false.B
w_grantfirst := false.B
w_grantlast := false.B
w_grant := false.B
s_grantack := false.B
s_writeback := false.B
}
// Do we need a probe?
when ((!params.firstLevel).B && (new_meta.hit &&
(new_needT || new_meta.state === TRUNK) &&
(new_meta.clients & ~new_skipProbe) =/= 0.U)) {
s_pprobe := false.B
w_pprobeackfirst := false.B
w_pprobeacklast := false.B
w_pprobeack := false.B
s_writeback := false.B
}
// Do we need a grantack?
when (new_request.opcode === AcquireBlock || new_request.opcode === AcquirePerm) {
w_grantack := false.B
s_writeback := false.B
}
// Becomes dirty?
when (!new_request.opcode(2) && new_meta.hit && !new_meta.dirty) {
s_writeback := false.B
}
}
}
}
File Parameters.scala:
/*
* Copyright 2019 SiFive, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You should have received a copy of LICENSE.Apache2 along with
* this software. If not, you may obtain a copy at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sifive.blocks.inclusivecache
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config._
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.tilelink._
import freechips.rocketchip.util._
import freechips.rocketchip.util.property.cover
import scala.math.{min,max}
case class CacheParameters(
level: Int,
ways: Int,
sets: Int,
blockBytes: Int,
beatBytes: Int, // inner
hintsSkipProbe: Boolean)
{
require (ways > 0)
require (sets > 0)
require (blockBytes > 0 && isPow2(blockBytes))
require (beatBytes > 0 && isPow2(beatBytes))
require (blockBytes >= beatBytes)
val blocks = ways * sets
val sizeBytes = blocks * blockBytes
val blockBeats = blockBytes/beatBytes
}
case class InclusiveCachePortParameters(
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)
{
def apply()(implicit p: Parameters, valName: ValName) = LazyModule(new TLBuffer(a, b, c, d, e))
}
object InclusiveCachePortParameters
{
val none = InclusiveCachePortParameters(
a = BufferParams.none,
b = BufferParams.none,
c = BufferParams.none,
d = BufferParams.none,
e = BufferParams.none)
val full = InclusiveCachePortParameters(
a = BufferParams.default,
b = BufferParams.default,
c = BufferParams.default,
d = BufferParams.default,
e = BufferParams.default)
// This removes feed-through paths from C=>A and A=>C
val fullC = InclusiveCachePortParameters(
a = BufferParams.none,
b = BufferParams.none,
c = BufferParams.default,
d = BufferParams.none,
e = BufferParams.none)
val flowAD = InclusiveCachePortParameters(
a = BufferParams.flow,
b = BufferParams.none,
c = BufferParams.none,
d = BufferParams.flow,
e = BufferParams.none)
val flowAE = InclusiveCachePortParameters(
a = BufferParams.flow,
b = BufferParams.none,
c = BufferParams.none,
d = BufferParams.none,
e = BufferParams.flow)
// For innerBuf:
// SinkA: no restrictions, flows into scheduler+putbuffer
// SourceB: no restrictions, flows out of scheduler
// sinkC: no restrictions, flows into scheduler+putbuffer & buffered to bankedStore
// SourceD: no restrictions, flows out of bankedStore/regout
// SinkE: no restrictions, flows into scheduler
//
// ... so while none is possible, you probably want at least flowAC to cut ready
// from the scheduler delay and flowD to ease SourceD back-pressure
// For outerBufer:
// SourceA: must not be pipe, flows out of scheduler
// SinkB: no restrictions, flows into scheduler
// SourceC: pipe is useless, flows out of bankedStore/regout, parameter depth ignored
// SinkD: no restrictions, flows into scheduler & bankedStore
// SourceE: must not be pipe, flows out of scheduler
//
// ... AE take the channel ready into the scheduler, so you need at least flowAE
}
case class InclusiveCacheMicroParameters(
writeBytes: Int, // backing store update granularity
memCycles: Int = 40, // # of L2 clock cycles for a memory round-trip (50ns @ 800MHz)
portFactor: Int = 4, // numSubBanks = (widest TL port * portFactor) / writeBytes
dirReg: Boolean = false,
innerBuf: InclusiveCachePortParameters = InclusiveCachePortParameters.fullC, // or none
outerBuf: InclusiveCachePortParameters = InclusiveCachePortParameters.full) // or flowAE
{
require (writeBytes > 0 && isPow2(writeBytes))
require (memCycles > 0)
require (portFactor >= 2) // for inner RMW and concurrent outer Relase + Grant
}
case class InclusiveCacheControlParameters(
address: BigInt,
beatBytes: Int,
bankedControl: Boolean)
case class InclusiveCacheParameters(
cache: CacheParameters,
micro: InclusiveCacheMicroParameters,
control: Boolean,
inner: TLEdgeIn,
outer: TLEdgeOut)(implicit val p: Parameters)
{
require (cache.ways > 1)
require (cache.sets > 1 && isPow2(cache.sets))
require (micro.writeBytes <= inner.manager.beatBytes)
require (micro.writeBytes <= outer.manager.beatBytes)
require (inner.manager.beatBytes <= cache.blockBytes)
require (outer.manager.beatBytes <= cache.blockBytes)
// Require that all cached address ranges have contiguous blocks
outer.manager.managers.flatMap(_.address).foreach { a =>
require (a.alignment >= cache.blockBytes)
}
// If we are the first level cache, we do not need to support inner-BCE
val firstLevel = !inner.client.clients.exists(_.supports.probe)
// If we are the last level cache, we do not need to support outer-B
val lastLevel = !outer.manager.managers.exists(_.regionType > RegionType.UNCACHED)
require (lastLevel)
// Provision enough resources to achieve full throughput with missing single-beat accesses
val mshrs = InclusiveCacheParameters.all_mshrs(cache, micro)
val secondary = max(mshrs, micro.memCycles - mshrs)
val putLists = micro.memCycles // allow every request to be single beat
val putBeats = max(2*cache.blockBeats, micro.memCycles)
val relLists = 2
val relBeats = relLists*cache.blockBeats
val flatAddresses = AddressSet.unify(outer.manager.managers.flatMap(_.address))
val pickMask = AddressDecoder(flatAddresses.map(Seq(_)), flatAddresses.map(_.mask).reduce(_|_))
def bitOffsets(x: BigInt, offset: Int = 0, tail: List[Int] = List.empty[Int]): List[Int] =
if (x == 0) tail.reverse else bitOffsets(x >> 1, offset + 1, if ((x & 1) == 1) offset :: tail else tail)
val addressMapping = bitOffsets(pickMask)
val addressBits = addressMapping.size
// println(s"addresses: ${flatAddresses} => ${pickMask} => ${addressBits}")
val allClients = inner.client.clients.size
val clientBitsRaw = inner.client.clients.filter(_.supports.probe).size
val clientBits = max(1, clientBitsRaw)
val stateBits = 2
val wayBits = log2Ceil(cache.ways)
val setBits = log2Ceil(cache.sets)
val offsetBits = log2Ceil(cache.blockBytes)
val tagBits = addressBits - setBits - offsetBits
val putBits = log2Ceil(max(putLists, relLists))
require (tagBits > 0)
require (offsetBits > 0)
val innerBeatBits = (offsetBits - log2Ceil(inner.manager.beatBytes)) max 1
val outerBeatBits = (offsetBits - log2Ceil(outer.manager.beatBytes)) max 1
val innerMaskBits = inner.manager.beatBytes / micro.writeBytes
val outerMaskBits = outer.manager.beatBytes / micro.writeBytes
def clientBit(source: UInt): UInt = {
if (clientBitsRaw == 0) {
0.U
} else {
Cat(inner.client.clients.filter(_.supports.probe).map(_.sourceId.contains(source)).reverse)
}
}
def clientSource(bit: UInt): UInt = {
if (clientBitsRaw == 0) {
0.U
} else {
Mux1H(bit, inner.client.clients.filter(_.supports.probe).map(c => c.sourceId.start.U))
}
}
def parseAddress(x: UInt): (UInt, UInt, UInt) = {
val offset = Cat(addressMapping.map(o => x(o,o)).reverse)
val set = offset >> offsetBits
val tag = set >> setBits
(tag(tagBits-1, 0), set(setBits-1, 0), offset(offsetBits-1, 0))
}
def widen(x: UInt, width: Int): UInt = {
val y = x | 0.U(width.W)
assert (y >> width === 0.U)
y(width-1, 0)
}
def expandAddress(tag: UInt, set: UInt, offset: UInt): UInt = {
val base = Cat(widen(tag, tagBits), widen(set, setBits), widen(offset, offsetBits))
val bits = Array.fill(outer.bundle.addressBits) { 0.U(1.W) }
addressMapping.zipWithIndex.foreach { case (a, i) => bits(a) = base(i,i) }
Cat(bits.reverse)
}
def restoreAddress(expanded: UInt): UInt = {
val missingBits = flatAddresses
.map { a => (a.widen(pickMask).base, a.widen(~pickMask)) } // key is the bits to restore on match
.groupBy(_._1)
.view
.mapValues(_.map(_._2))
val muxMask = AddressDecoder(missingBits.values.toList)
val mux = missingBits.toList.map { case (bits, addrs) =>
val widen = addrs.map(_.widen(~muxMask))
val matches = AddressSet
.unify(widen.distinct)
.map(_.contains(expanded))
.reduce(_ || _)
(matches, bits.U)
}
expanded | Mux1H(mux)
}
def dirReg[T <: Data](x: T, en: Bool = true.B): T = {
if (micro.dirReg) RegEnable(x, en) else x
}
def ccover(cond: Bool, label: String, desc: String)(implicit sourceInfo: SourceInfo) =
cover(cond, "CCACHE_L" + cache.level + "_" + label, "MemorySystem;;" + desc)
}
object MetaData
{
val stateBits = 2
def INVALID: UInt = 0.U(stateBits.W) // way is empty
def BRANCH: UInt = 1.U(stateBits.W) // outer slave cache is trunk
def TRUNK: UInt = 2.U(stateBits.W) // unique inner master cache is trunk
def TIP: UInt = 3.U(stateBits.W) // we are trunk, inner masters are branch
// Does a request need trunk?
def needT(opcode: UInt, param: UInt): Bool = {
!opcode(2) ||
(opcode === TLMessages.Hint && param === TLHints.PREFETCH_WRITE) ||
((opcode === TLMessages.AcquireBlock || opcode === TLMessages.AcquirePerm) && param =/= TLPermissions.NtoB)
}
// Does a request prove the client need not be probed?
def skipProbeN(opcode: UInt, hintsSkipProbe: Boolean): Bool = {
// Acquire(toB) and Get => is N, so no probe
// Acquire(*toT) => is N or B, but need T, so no probe
// Hint => could be anything, so probe IS needed, if hintsSkipProbe is enabled, skip probe the same client
// Put* => is N or B, so probe IS needed
opcode === TLMessages.AcquireBlock || opcode === TLMessages.AcquirePerm || opcode === TLMessages.Get || (opcode === TLMessages.Hint && hintsSkipProbe.B)
}
def isToN(param: UInt): Bool = {
param === TLPermissions.TtoN || param === TLPermissions.BtoN || param === TLPermissions.NtoN
}
def isToB(param: UInt): Bool = {
param === TLPermissions.TtoB || param === TLPermissions.BtoB
}
}
object InclusiveCacheParameters
{
val lfsrBits = 10
val L2ControlAddress = 0x2010000
val L2ControlSize = 0x1000
def out_mshrs(cache: CacheParameters, micro: InclusiveCacheMicroParameters): Int = {
// We need 2-3 normal MSHRs to cover the Directory latency
// To fully exploit memory bandwidth-delay-product, we need memCyles/blockBeats MSHRs
max(if (micro.dirReg) 3 else 2, (micro.memCycles + cache.blockBeats - 1) / cache.blockBeats)
}
def all_mshrs(cache: CacheParameters, micro: InclusiveCacheMicroParameters): Int =
// We need a dedicated MSHR for B+C each
2 + out_mshrs(cache, micro)
}
class InclusiveCacheBundle(params: InclusiveCacheParameters) extends Bundle
| module MSHR_71( // @[MSHR.scala:84:7]
input clock, // @[MSHR.scala:84:7]
input reset, // @[MSHR.scala:84:7]
input io_allocate_valid, // @[MSHR.scala:86:14]
input io_allocate_bits_prio_2, // @[MSHR.scala:86:14]
input io_allocate_bits_control, // @[MSHR.scala:86:14]
input [2:0] io_allocate_bits_opcode, // @[MSHR.scala:86:14]
input [2:0] io_allocate_bits_param, // @[MSHR.scala:86:14]
input [2:0] io_allocate_bits_size, // @[MSHR.scala:86:14]
input [5:0] io_allocate_bits_source, // @[MSHR.scala:86:14]
input [8:0] io_allocate_bits_tag, // @[MSHR.scala:86:14]
input [5:0] io_allocate_bits_offset, // @[MSHR.scala:86:14]
input [5:0] io_allocate_bits_put, // @[MSHR.scala:86:14]
input [10:0] io_allocate_bits_set, // @[MSHR.scala:86:14]
input io_allocate_bits_repeat, // @[MSHR.scala:86:14]
input io_directory_valid, // @[MSHR.scala:86:14]
input io_directory_bits_dirty, // @[MSHR.scala:86:14]
input [1:0] io_directory_bits_state, // @[MSHR.scala:86:14]
input io_directory_bits_clients, // @[MSHR.scala:86:14]
input [8:0] io_directory_bits_tag, // @[MSHR.scala:86:14]
input io_directory_bits_hit, // @[MSHR.scala:86:14]
input [3:0] io_directory_bits_way, // @[MSHR.scala:86:14]
output io_status_valid, // @[MSHR.scala:86:14]
output [10:0] io_status_bits_set, // @[MSHR.scala:86:14]
output [8:0] io_status_bits_tag, // @[MSHR.scala:86:14]
output [3:0] io_status_bits_way, // @[MSHR.scala:86:14]
output io_status_bits_blockB, // @[MSHR.scala:86:14]
output io_status_bits_nestB, // @[MSHR.scala:86:14]
output io_status_bits_blockC, // @[MSHR.scala:86:14]
output io_status_bits_nestC, // @[MSHR.scala:86:14]
input io_schedule_ready, // @[MSHR.scala:86:14]
output io_schedule_valid, // @[MSHR.scala:86:14]
output io_schedule_bits_a_valid, // @[MSHR.scala:86:14]
output [8:0] io_schedule_bits_a_bits_tag, // @[MSHR.scala:86:14]
output [10:0] io_schedule_bits_a_bits_set, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_a_bits_param, // @[MSHR.scala:86:14]
output io_schedule_bits_a_bits_block, // @[MSHR.scala:86:14]
output io_schedule_bits_b_valid, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_b_bits_param, // @[MSHR.scala:86:14]
output [8:0] io_schedule_bits_b_bits_tag, // @[MSHR.scala:86:14]
output [10:0] io_schedule_bits_b_bits_set, // @[MSHR.scala:86:14]
output io_schedule_bits_b_bits_clients, // @[MSHR.scala:86:14]
output io_schedule_bits_c_valid, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_c_bits_opcode, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_c_bits_param, // @[MSHR.scala:86:14]
output [8:0] io_schedule_bits_c_bits_tag, // @[MSHR.scala:86:14]
output [10:0] io_schedule_bits_c_bits_set, // @[MSHR.scala:86:14]
output [3:0] io_schedule_bits_c_bits_way, // @[MSHR.scala:86:14]
output io_schedule_bits_c_bits_dirty, // @[MSHR.scala:86:14]
output io_schedule_bits_d_valid, // @[MSHR.scala:86:14]
output io_schedule_bits_d_bits_prio_2, // @[MSHR.scala:86:14]
output io_schedule_bits_d_bits_control, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_d_bits_opcode, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_d_bits_param, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_d_bits_size, // @[MSHR.scala:86:14]
output [5:0] io_schedule_bits_d_bits_source, // @[MSHR.scala:86:14]
output [8:0] io_schedule_bits_d_bits_tag, // @[MSHR.scala:86:14]
output [5:0] io_schedule_bits_d_bits_offset, // @[MSHR.scala:86:14]
output [5:0] io_schedule_bits_d_bits_put, // @[MSHR.scala:86:14]
output [10:0] io_schedule_bits_d_bits_set, // @[MSHR.scala:86:14]
output [3:0] io_schedule_bits_d_bits_way, // @[MSHR.scala:86:14]
output io_schedule_bits_d_bits_bad, // @[MSHR.scala:86:14]
output io_schedule_bits_e_valid, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_e_bits_sink, // @[MSHR.scala:86:14]
output io_schedule_bits_x_valid, // @[MSHR.scala:86:14]
output io_schedule_bits_dir_valid, // @[MSHR.scala:86:14]
output [10:0] io_schedule_bits_dir_bits_set, // @[MSHR.scala:86:14]
output [3:0] io_schedule_bits_dir_bits_way, // @[MSHR.scala:86:14]
output io_schedule_bits_dir_bits_data_dirty, // @[MSHR.scala:86:14]
output [1:0] io_schedule_bits_dir_bits_data_state, // @[MSHR.scala:86:14]
output io_schedule_bits_dir_bits_data_clients, // @[MSHR.scala:86:14]
output [8:0] io_schedule_bits_dir_bits_data_tag, // @[MSHR.scala:86:14]
output io_schedule_bits_reload, // @[MSHR.scala:86:14]
input io_sinkc_valid, // @[MSHR.scala:86:14]
input io_sinkc_bits_last, // @[MSHR.scala:86:14]
input [10:0] io_sinkc_bits_set, // @[MSHR.scala:86:14]
input [8:0] io_sinkc_bits_tag, // @[MSHR.scala:86:14]
input [5:0] io_sinkc_bits_source, // @[MSHR.scala:86:14]
input [2:0] io_sinkc_bits_param, // @[MSHR.scala:86:14]
input io_sinkc_bits_data, // @[MSHR.scala:86:14]
input io_sinkd_valid, // @[MSHR.scala:86:14]
input io_sinkd_bits_last, // @[MSHR.scala:86:14]
input [2:0] io_sinkd_bits_opcode, // @[MSHR.scala:86:14]
input [2:0] io_sinkd_bits_param, // @[MSHR.scala:86:14]
input [3:0] io_sinkd_bits_source, // @[MSHR.scala:86:14]
input [2:0] io_sinkd_bits_sink, // @[MSHR.scala:86:14]
input io_sinkd_bits_denied, // @[MSHR.scala:86:14]
input io_sinke_valid, // @[MSHR.scala:86:14]
input [3:0] io_sinke_bits_sink, // @[MSHR.scala:86:14]
input [10:0] io_nestedwb_set, // @[MSHR.scala:86:14]
input [8:0] io_nestedwb_tag, // @[MSHR.scala:86:14]
input io_nestedwb_b_toN, // @[MSHR.scala:86:14]
input io_nestedwb_b_toB, // @[MSHR.scala:86:14]
input io_nestedwb_b_clr_dirty, // @[MSHR.scala:86:14]
input io_nestedwb_c_set_dirty // @[MSHR.scala:86:14]
);
wire [8:0] final_meta_writeback_tag; // @[MSHR.scala:215:38]
wire final_meta_writeback_clients; // @[MSHR.scala:215:38]
wire [1:0] final_meta_writeback_state; // @[MSHR.scala:215:38]
wire final_meta_writeback_dirty; // @[MSHR.scala:215:38]
wire io_allocate_valid_0 = io_allocate_valid; // @[MSHR.scala:84:7]
wire io_allocate_bits_prio_2_0 = io_allocate_bits_prio_2; // @[MSHR.scala:84:7]
wire io_allocate_bits_control_0 = io_allocate_bits_control; // @[MSHR.scala:84:7]
wire [2:0] io_allocate_bits_opcode_0 = io_allocate_bits_opcode; // @[MSHR.scala:84:7]
wire [2:0] io_allocate_bits_param_0 = io_allocate_bits_param; // @[MSHR.scala:84:7]
wire [2:0] io_allocate_bits_size_0 = io_allocate_bits_size; // @[MSHR.scala:84:7]
wire [5:0] io_allocate_bits_source_0 = io_allocate_bits_source; // @[MSHR.scala:84:7]
wire [8:0] io_allocate_bits_tag_0 = io_allocate_bits_tag; // @[MSHR.scala:84:7]
wire [5:0] io_allocate_bits_offset_0 = io_allocate_bits_offset; // @[MSHR.scala:84:7]
wire [5:0] io_allocate_bits_put_0 = io_allocate_bits_put; // @[MSHR.scala:84:7]
wire [10:0] io_allocate_bits_set_0 = io_allocate_bits_set; // @[MSHR.scala:84:7]
wire io_allocate_bits_repeat_0 = io_allocate_bits_repeat; // @[MSHR.scala:84:7]
wire io_directory_valid_0 = io_directory_valid; // @[MSHR.scala:84:7]
wire io_directory_bits_dirty_0 = io_directory_bits_dirty; // @[MSHR.scala:84:7]
wire [1:0] io_directory_bits_state_0 = io_directory_bits_state; // @[MSHR.scala:84:7]
wire io_directory_bits_clients_0 = io_directory_bits_clients; // @[MSHR.scala:84:7]
wire [8:0] io_directory_bits_tag_0 = io_directory_bits_tag; // @[MSHR.scala:84:7]
wire io_directory_bits_hit_0 = io_directory_bits_hit; // @[MSHR.scala:84:7]
wire [3:0] io_directory_bits_way_0 = io_directory_bits_way; // @[MSHR.scala:84:7]
wire io_schedule_ready_0 = io_schedule_ready; // @[MSHR.scala:84:7]
wire io_sinkc_valid_0 = io_sinkc_valid; // @[MSHR.scala:84:7]
wire io_sinkc_bits_last_0 = io_sinkc_bits_last; // @[MSHR.scala:84:7]
wire [10:0] io_sinkc_bits_set_0 = io_sinkc_bits_set; // @[MSHR.scala:84:7]
wire [8:0] io_sinkc_bits_tag_0 = io_sinkc_bits_tag; // @[MSHR.scala:84:7]
wire [5:0] io_sinkc_bits_source_0 = io_sinkc_bits_source; // @[MSHR.scala:84:7]
wire [2:0] io_sinkc_bits_param_0 = io_sinkc_bits_param; // @[MSHR.scala:84:7]
wire io_sinkc_bits_data_0 = io_sinkc_bits_data; // @[MSHR.scala:84:7]
wire io_sinkd_valid_0 = io_sinkd_valid; // @[MSHR.scala:84:7]
wire io_sinkd_bits_last_0 = io_sinkd_bits_last; // @[MSHR.scala:84:7]
wire [2:0] io_sinkd_bits_opcode_0 = io_sinkd_bits_opcode; // @[MSHR.scala:84:7]
wire [2:0] io_sinkd_bits_param_0 = io_sinkd_bits_param; // @[MSHR.scala:84:7]
wire [3:0] io_sinkd_bits_source_0 = io_sinkd_bits_source; // @[MSHR.scala:84:7]
wire [2:0] io_sinkd_bits_sink_0 = io_sinkd_bits_sink; // @[MSHR.scala:84:7]
wire io_sinkd_bits_denied_0 = io_sinkd_bits_denied; // @[MSHR.scala:84:7]
wire io_sinke_valid_0 = io_sinke_valid; // @[MSHR.scala:84:7]
wire [3:0] io_sinke_bits_sink_0 = io_sinke_bits_sink; // @[MSHR.scala:84:7]
wire [10:0] io_nestedwb_set_0 = io_nestedwb_set; // @[MSHR.scala:84:7]
wire [8:0] io_nestedwb_tag_0 = io_nestedwb_tag; // @[MSHR.scala:84:7]
wire io_nestedwb_b_toN_0 = io_nestedwb_b_toN; // @[MSHR.scala:84:7]
wire io_nestedwb_b_toB_0 = io_nestedwb_b_toB; // @[MSHR.scala:84:7]
wire io_nestedwb_b_clr_dirty_0 = io_nestedwb_b_clr_dirty; // @[MSHR.scala:84:7]
wire io_nestedwb_c_set_dirty_0 = io_nestedwb_c_set_dirty; // @[MSHR.scala:84:7]
wire io_allocate_bits_prio_0 = 1'h0; // @[MSHR.scala:84:7]
wire io_allocate_bits_prio_1 = 1'h0; // @[MSHR.scala:84:7]
wire io_schedule_bits_d_bits_prio_0 = 1'h0; // @[MSHR.scala:84:7]
wire io_schedule_bits_d_bits_prio_1 = 1'h0; // @[MSHR.scala:84:7]
wire io_schedule_bits_x_bits_fail = 1'h0; // @[MSHR.scala:84:7]
wire _io_schedule_bits_c_valid_T_2 = 1'h0; // @[MSHR.scala:186:68]
wire _io_schedule_bits_c_valid_T_3 = 1'h0; // @[MSHR.scala:186:80]
wire invalid_dirty = 1'h0; // @[MSHR.scala:268:21]
wire invalid_clients = 1'h0; // @[MSHR.scala:268:21]
wire _excluded_client_T = 1'h0; // @[MSHR.scala:279:38]
wire _excluded_client_T_7 = 1'h0; // @[Parameters.scala:279:137]
wire _excluded_client_T_9 = 1'h0; // @[MSHR.scala:279:57]
wire excluded_client = 1'h0; // @[MSHR.scala:279:28]
wire _after_T_4 = 1'h0; // @[MSHR.scala:323:11]
wire allocate_as_full_prio_0 = 1'h0; // @[MSHR.scala:504:34]
wire allocate_as_full_prio_1 = 1'h0; // @[MSHR.scala:504:34]
wire new_request_prio_0 = 1'h0; // @[MSHR.scala:506:24]
wire new_request_prio_1 = 1'h0; // @[MSHR.scala:506:24]
wire _new_skipProbe_T_6 = 1'h0; // @[Parameters.scala:279:137]
wire _prior_T_4 = 1'h0; // @[MSHR.scala:323:11]
wire _io_schedule_bits_b_bits_clients_T = 1'h1; // @[MSHR.scala:289:53]
wire _last_probe_T_1 = 1'h1; // @[MSHR.scala:459:66]
wire [3:0] io_schedule_bits_a_bits_source = 4'h0; // @[MSHR.scala:84:7]
wire [3:0] io_schedule_bits_c_bits_source = 4'h0; // @[MSHR.scala:84:7]
wire [3:0] io_schedule_bits_d_bits_sink = 4'h0; // @[MSHR.scala:84:7]
wire [8:0] invalid_tag = 9'h0; // @[MSHR.scala:268:21]
wire [1:0] invalid_state = 2'h0; // @[MSHR.scala:268:21]
wire [1:0] _final_meta_writeback_state_T_11 = 2'h1; // @[MSHR.scala:240:70]
wire allocate_as_full_prio_2 = io_allocate_bits_prio_2_0; // @[MSHR.scala:84:7, :504:34]
wire allocate_as_full_control = io_allocate_bits_control_0; // @[MSHR.scala:84:7, :504:34]
wire [2:0] allocate_as_full_opcode = io_allocate_bits_opcode_0; // @[MSHR.scala:84:7, :504:34]
wire [2:0] allocate_as_full_param = io_allocate_bits_param_0; // @[MSHR.scala:84:7, :504:34]
wire [2:0] allocate_as_full_size = io_allocate_bits_size_0; // @[MSHR.scala:84:7, :504:34]
wire [5:0] allocate_as_full_source = io_allocate_bits_source_0; // @[MSHR.scala:84:7, :504:34]
wire [8:0] allocate_as_full_tag = io_allocate_bits_tag_0; // @[MSHR.scala:84:7, :504:34]
wire [5:0] allocate_as_full_offset = io_allocate_bits_offset_0; // @[MSHR.scala:84:7, :504:34]
wire [5:0] allocate_as_full_put = io_allocate_bits_put_0; // @[MSHR.scala:84:7, :504:34]
wire [10:0] allocate_as_full_set = io_allocate_bits_set_0; // @[MSHR.scala:84:7, :504:34]
wire _io_status_bits_blockB_T_8; // @[MSHR.scala:168:40]
wire _io_status_bits_nestB_T_4; // @[MSHR.scala:169:93]
wire _io_status_bits_blockC_T; // @[MSHR.scala:172:28]
wire _io_status_bits_nestC_T_5; // @[MSHR.scala:173:39]
wire _io_schedule_valid_T_5; // @[MSHR.scala:193:105]
wire _io_schedule_bits_a_valid_T_2; // @[MSHR.scala:184:55]
wire _io_schedule_bits_a_bits_block_T_5; // @[MSHR.scala:283:91]
wire _io_schedule_bits_b_valid_T_2; // @[MSHR.scala:185:41]
wire [2:0] _io_schedule_bits_b_bits_param_T_3; // @[MSHR.scala:286:41]
wire [8:0] _io_schedule_bits_b_bits_tag_T_1; // @[MSHR.scala:287:41]
wire _io_schedule_bits_b_bits_clients_T_1; // @[MSHR.scala:289:51]
wire _io_schedule_bits_c_valid_T_4; // @[MSHR.scala:186:64]
wire [2:0] _io_schedule_bits_c_bits_opcode_T; // @[MSHR.scala:290:41]
wire [2:0] _io_schedule_bits_c_bits_param_T_1; // @[MSHR.scala:291:41]
wire _io_schedule_bits_d_valid_T_2; // @[MSHR.scala:187:57]
wire [2:0] _io_schedule_bits_d_bits_param_T_9; // @[MSHR.scala:298:41]
wire _io_schedule_bits_e_valid_T_1; // @[MSHR.scala:188:43]
wire _io_schedule_bits_x_valid_T_1; // @[MSHR.scala:189:40]
wire _io_schedule_bits_dir_valid_T_4; // @[MSHR.scala:190:66]
wire _io_schedule_bits_dir_bits_data_T_1_dirty; // @[MSHR.scala:310:41]
wire [1:0] _io_schedule_bits_dir_bits_data_T_1_state; // @[MSHR.scala:310:41]
wire _io_schedule_bits_dir_bits_data_T_1_clients; // @[MSHR.scala:310:41]
wire [8:0] _io_schedule_bits_dir_bits_data_T_1_tag; // @[MSHR.scala:310:41]
wire no_wait; // @[MSHR.scala:183:83]
wire [10:0] io_status_bits_set_0; // @[MSHR.scala:84:7]
wire [8:0] io_status_bits_tag_0; // @[MSHR.scala:84:7]
wire [3:0] io_status_bits_way_0; // @[MSHR.scala:84:7]
wire io_status_bits_blockB_0; // @[MSHR.scala:84:7]
wire io_status_bits_nestB_0; // @[MSHR.scala:84:7]
wire io_status_bits_blockC_0; // @[MSHR.scala:84:7]
wire io_status_bits_nestC_0; // @[MSHR.scala:84:7]
wire io_status_valid_0; // @[MSHR.scala:84:7]
wire [8:0] io_schedule_bits_a_bits_tag_0; // @[MSHR.scala:84:7]
wire [10:0] io_schedule_bits_a_bits_set_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_a_bits_param_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_a_bits_block_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_a_valid_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_b_bits_param_0; // @[MSHR.scala:84:7]
wire [8:0] io_schedule_bits_b_bits_tag_0; // @[MSHR.scala:84:7]
wire [10:0] io_schedule_bits_b_bits_set_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_b_bits_clients_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_b_valid_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_c_bits_opcode_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_c_bits_param_0; // @[MSHR.scala:84:7]
wire [8:0] io_schedule_bits_c_bits_tag_0; // @[MSHR.scala:84:7]
wire [10:0] io_schedule_bits_c_bits_set_0; // @[MSHR.scala:84:7]
wire [3:0] io_schedule_bits_c_bits_way_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_c_bits_dirty_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_c_valid_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_d_bits_prio_2_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_d_bits_control_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_d_bits_opcode_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_d_bits_param_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_d_bits_size_0; // @[MSHR.scala:84:7]
wire [5:0] io_schedule_bits_d_bits_source_0; // @[MSHR.scala:84:7]
wire [8:0] io_schedule_bits_d_bits_tag_0; // @[MSHR.scala:84:7]
wire [5:0] io_schedule_bits_d_bits_offset_0; // @[MSHR.scala:84:7]
wire [5:0] io_schedule_bits_d_bits_put_0; // @[MSHR.scala:84:7]
wire [10:0] io_schedule_bits_d_bits_set_0; // @[MSHR.scala:84:7]
wire [3:0] io_schedule_bits_d_bits_way_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_d_bits_bad_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_d_valid_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_e_bits_sink_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_e_valid_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_x_valid_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_dir_bits_data_dirty_0; // @[MSHR.scala:84:7]
wire [1:0] io_schedule_bits_dir_bits_data_state_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_dir_bits_data_clients_0; // @[MSHR.scala:84:7]
wire [8:0] io_schedule_bits_dir_bits_data_tag_0; // @[MSHR.scala:84:7]
wire [10:0] io_schedule_bits_dir_bits_set_0; // @[MSHR.scala:84:7]
wire [3:0] io_schedule_bits_dir_bits_way_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_dir_valid_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_reload_0; // @[MSHR.scala:84:7]
wire io_schedule_valid_0; // @[MSHR.scala:84:7]
reg request_valid; // @[MSHR.scala:97:30]
assign io_status_valid_0 = request_valid; // @[MSHR.scala:84:7, :97:30]
reg request_prio_2; // @[MSHR.scala:98:20]
assign io_schedule_bits_d_bits_prio_2_0 = request_prio_2; // @[MSHR.scala:84:7, :98:20]
reg request_control; // @[MSHR.scala:98:20]
assign io_schedule_bits_d_bits_control_0 = request_control; // @[MSHR.scala:84:7, :98:20]
reg [2:0] request_opcode; // @[MSHR.scala:98:20]
assign io_schedule_bits_d_bits_opcode_0 = request_opcode; // @[MSHR.scala:84:7, :98:20]
reg [2:0] request_param; // @[MSHR.scala:98:20]
reg [2:0] request_size; // @[MSHR.scala:98:20]
assign io_schedule_bits_d_bits_size_0 = request_size; // @[MSHR.scala:84:7, :98:20]
reg [5:0] request_source; // @[MSHR.scala:98:20]
assign io_schedule_bits_d_bits_source_0 = request_source; // @[MSHR.scala:84:7, :98:20]
reg [8:0] request_tag; // @[MSHR.scala:98:20]
assign io_status_bits_tag_0 = request_tag; // @[MSHR.scala:84:7, :98:20]
assign io_schedule_bits_a_bits_tag_0 = request_tag; // @[MSHR.scala:84:7, :98:20]
assign io_schedule_bits_d_bits_tag_0 = request_tag; // @[MSHR.scala:84:7, :98:20]
reg [5:0] request_offset; // @[MSHR.scala:98:20]
assign io_schedule_bits_d_bits_offset_0 = request_offset; // @[MSHR.scala:84:7, :98:20]
reg [5:0] request_put; // @[MSHR.scala:98:20]
assign io_schedule_bits_d_bits_put_0 = request_put; // @[MSHR.scala:84:7, :98:20]
reg [10:0] request_set; // @[MSHR.scala:98:20]
assign io_status_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20]
assign io_schedule_bits_a_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20]
assign io_schedule_bits_b_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20]
assign io_schedule_bits_c_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20]
assign io_schedule_bits_d_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20]
assign io_schedule_bits_dir_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20]
reg meta_valid; // @[MSHR.scala:99:27]
reg meta_dirty; // @[MSHR.scala:100:17]
assign io_schedule_bits_c_bits_dirty_0 = meta_dirty; // @[MSHR.scala:84:7, :100:17]
reg [1:0] meta_state; // @[MSHR.scala:100:17]
reg meta_clients; // @[MSHR.scala:100:17]
wire _meta_no_clients_T = meta_clients; // @[MSHR.scala:100:17, :220:39]
assign _io_schedule_bits_b_bits_clients_T_1 = meta_clients; // @[MSHR.scala:100:17, :289:51]
wire evict_c = meta_clients; // @[MSHR.scala:100:17, :315:27]
wire before_c = meta_clients; // @[MSHR.scala:100:17, :315:27]
wire _last_probe_T_2 = meta_clients; // @[MSHR.scala:100:17, :459:64]
reg [8:0] meta_tag; // @[MSHR.scala:100:17]
assign io_schedule_bits_c_bits_tag_0 = meta_tag; // @[MSHR.scala:84:7, :100:17]
reg meta_hit; // @[MSHR.scala:100:17]
reg [3:0] meta_way; // @[MSHR.scala:100:17]
assign io_status_bits_way_0 = meta_way; // @[MSHR.scala:84:7, :100:17]
assign io_schedule_bits_c_bits_way_0 = meta_way; // @[MSHR.scala:84:7, :100:17]
assign io_schedule_bits_d_bits_way_0 = meta_way; // @[MSHR.scala:84:7, :100:17]
assign io_schedule_bits_dir_bits_way_0 = meta_way; // @[MSHR.scala:84:7, :100:17]
wire [3:0] final_meta_writeback_way = meta_way; // @[MSHR.scala:100:17, :215:38]
reg s_rprobe; // @[MSHR.scala:121:33]
reg w_rprobeackfirst; // @[MSHR.scala:122:33]
reg w_rprobeacklast; // @[MSHR.scala:123:33]
reg s_release; // @[MSHR.scala:124:33]
reg w_releaseack; // @[MSHR.scala:125:33]
reg s_pprobe; // @[MSHR.scala:126:33]
reg s_acquire; // @[MSHR.scala:127:33]
reg s_flush; // @[MSHR.scala:128:33]
reg w_grantfirst; // @[MSHR.scala:129:33]
reg w_grantlast; // @[MSHR.scala:130:33]
reg w_grant; // @[MSHR.scala:131:33]
reg w_pprobeackfirst; // @[MSHR.scala:132:33]
reg w_pprobeacklast; // @[MSHR.scala:133:33]
reg w_pprobeack; // @[MSHR.scala:134:33]
reg s_grantack; // @[MSHR.scala:136:33]
reg s_execute; // @[MSHR.scala:137:33]
reg w_grantack; // @[MSHR.scala:138:33]
reg s_writeback; // @[MSHR.scala:139:33]
reg [2:0] sink; // @[MSHR.scala:147:17]
assign io_schedule_bits_e_bits_sink_0 = sink; // @[MSHR.scala:84:7, :147:17]
reg gotT; // @[MSHR.scala:148:17]
reg bad_grant; // @[MSHR.scala:149:22]
assign io_schedule_bits_d_bits_bad_0 = bad_grant; // @[MSHR.scala:84:7, :149:22]
reg probes_done; // @[MSHR.scala:150:24]
reg probes_toN; // @[MSHR.scala:151:23]
reg probes_noT; // @[MSHR.scala:152:23]
wire _io_status_bits_blockB_T = ~meta_valid; // @[MSHR.scala:99:27, :168:28]
wire _io_status_bits_blockB_T_1 = ~w_releaseack; // @[MSHR.scala:125:33, :168:45]
wire _io_status_bits_blockB_T_2 = ~w_rprobeacklast; // @[MSHR.scala:123:33, :168:62]
wire _io_status_bits_blockB_T_3 = _io_status_bits_blockB_T_1 | _io_status_bits_blockB_T_2; // @[MSHR.scala:168:{45,59,62}]
wire _io_status_bits_blockB_T_4 = ~w_pprobeacklast; // @[MSHR.scala:133:33, :168:82]
wire _io_status_bits_blockB_T_5 = _io_status_bits_blockB_T_3 | _io_status_bits_blockB_T_4; // @[MSHR.scala:168:{59,79,82}]
wire _io_status_bits_blockB_T_6 = ~w_grantfirst; // @[MSHR.scala:129:33, :168:103]
wire _io_status_bits_blockB_T_7 = _io_status_bits_blockB_T_5 & _io_status_bits_blockB_T_6; // @[MSHR.scala:168:{79,100,103}]
assign _io_status_bits_blockB_T_8 = _io_status_bits_blockB_T | _io_status_bits_blockB_T_7; // @[MSHR.scala:168:{28,40,100}]
assign io_status_bits_blockB_0 = _io_status_bits_blockB_T_8; // @[MSHR.scala:84:7, :168:40]
wire _io_status_bits_nestB_T = meta_valid & w_releaseack; // @[MSHR.scala:99:27, :125:33, :169:39]
wire _io_status_bits_nestB_T_1 = _io_status_bits_nestB_T & w_rprobeacklast; // @[MSHR.scala:123:33, :169:{39,55}]
wire _io_status_bits_nestB_T_2 = _io_status_bits_nestB_T_1 & w_pprobeacklast; // @[MSHR.scala:133:33, :169:{55,74}]
wire _io_status_bits_nestB_T_3 = ~w_grantfirst; // @[MSHR.scala:129:33, :168:103, :169:96]
assign _io_status_bits_nestB_T_4 = _io_status_bits_nestB_T_2 & _io_status_bits_nestB_T_3; // @[MSHR.scala:169:{74,93,96}]
assign io_status_bits_nestB_0 = _io_status_bits_nestB_T_4; // @[MSHR.scala:84:7, :169:93]
assign _io_status_bits_blockC_T = ~meta_valid; // @[MSHR.scala:99:27, :168:28, :172:28]
assign io_status_bits_blockC_0 = _io_status_bits_blockC_T; // @[MSHR.scala:84:7, :172:28]
wire _io_status_bits_nestC_T = ~w_rprobeackfirst; // @[MSHR.scala:122:33, :173:43]
wire _io_status_bits_nestC_T_1 = ~w_pprobeackfirst; // @[MSHR.scala:132:33, :173:64]
wire _io_status_bits_nestC_T_2 = _io_status_bits_nestC_T | _io_status_bits_nestC_T_1; // @[MSHR.scala:173:{43,61,64}]
wire _io_status_bits_nestC_T_3 = ~w_grantfirst; // @[MSHR.scala:129:33, :168:103, :173:85]
wire _io_status_bits_nestC_T_4 = _io_status_bits_nestC_T_2 | _io_status_bits_nestC_T_3; // @[MSHR.scala:173:{61,82,85}]
assign _io_status_bits_nestC_T_5 = meta_valid & _io_status_bits_nestC_T_4; // @[MSHR.scala:99:27, :173:{39,82}]
assign io_status_bits_nestC_0 = _io_status_bits_nestC_T_5; // @[MSHR.scala:84:7, :173:39]
wire _no_wait_T = w_rprobeacklast & w_releaseack; // @[MSHR.scala:123:33, :125:33, :183:33]
wire _no_wait_T_1 = _no_wait_T & w_grantlast; // @[MSHR.scala:130:33, :183:{33,49}]
wire _no_wait_T_2 = _no_wait_T_1 & w_pprobeacklast; // @[MSHR.scala:133:33, :183:{49,64}]
assign no_wait = _no_wait_T_2 & w_grantack; // @[MSHR.scala:138:33, :183:{64,83}]
assign io_schedule_bits_reload_0 = no_wait; // @[MSHR.scala:84:7, :183:83]
wire _io_schedule_bits_a_valid_T = ~s_acquire; // @[MSHR.scala:127:33, :184:31]
wire _io_schedule_bits_a_valid_T_1 = _io_schedule_bits_a_valid_T & s_release; // @[MSHR.scala:124:33, :184:{31,42}]
assign _io_schedule_bits_a_valid_T_2 = _io_schedule_bits_a_valid_T_1 & s_pprobe; // @[MSHR.scala:126:33, :184:{42,55}]
assign io_schedule_bits_a_valid_0 = _io_schedule_bits_a_valid_T_2; // @[MSHR.scala:84:7, :184:55]
wire _io_schedule_bits_b_valid_T = ~s_rprobe; // @[MSHR.scala:121:33, :185:31]
wire _io_schedule_bits_b_valid_T_1 = ~s_pprobe; // @[MSHR.scala:126:33, :185:44]
assign _io_schedule_bits_b_valid_T_2 = _io_schedule_bits_b_valid_T | _io_schedule_bits_b_valid_T_1; // @[MSHR.scala:185:{31,41,44}]
assign io_schedule_bits_b_valid_0 = _io_schedule_bits_b_valid_T_2; // @[MSHR.scala:84:7, :185:41]
wire _io_schedule_bits_c_valid_T = ~s_release; // @[MSHR.scala:124:33, :186:32]
wire _io_schedule_bits_c_valid_T_1 = _io_schedule_bits_c_valid_T & w_rprobeackfirst; // @[MSHR.scala:122:33, :186:{32,43}]
assign _io_schedule_bits_c_valid_T_4 = _io_schedule_bits_c_valid_T_1; // @[MSHR.scala:186:{43,64}]
assign io_schedule_bits_c_valid_0 = _io_schedule_bits_c_valid_T_4; // @[MSHR.scala:84:7, :186:64]
wire _io_schedule_bits_d_valid_T = ~s_execute; // @[MSHR.scala:137:33, :187:31]
wire _io_schedule_bits_d_valid_T_1 = _io_schedule_bits_d_valid_T & w_pprobeack; // @[MSHR.scala:134:33, :187:{31,42}]
assign _io_schedule_bits_d_valid_T_2 = _io_schedule_bits_d_valid_T_1 & w_grant; // @[MSHR.scala:131:33, :187:{42,57}]
assign io_schedule_bits_d_valid_0 = _io_schedule_bits_d_valid_T_2; // @[MSHR.scala:84:7, :187:57]
wire _io_schedule_bits_e_valid_T = ~s_grantack; // @[MSHR.scala:136:33, :188:31]
assign _io_schedule_bits_e_valid_T_1 = _io_schedule_bits_e_valid_T & w_grantfirst; // @[MSHR.scala:129:33, :188:{31,43}]
assign io_schedule_bits_e_valid_0 = _io_schedule_bits_e_valid_T_1; // @[MSHR.scala:84:7, :188:43]
wire _io_schedule_bits_x_valid_T = ~s_flush; // @[MSHR.scala:128:33, :189:31]
assign _io_schedule_bits_x_valid_T_1 = _io_schedule_bits_x_valid_T & w_releaseack; // @[MSHR.scala:125:33, :189:{31,40}]
assign io_schedule_bits_x_valid_0 = _io_schedule_bits_x_valid_T_1; // @[MSHR.scala:84:7, :189:40]
wire _io_schedule_bits_dir_valid_T = ~s_release; // @[MSHR.scala:124:33, :186:32, :190:34]
wire _io_schedule_bits_dir_valid_T_1 = _io_schedule_bits_dir_valid_T & w_rprobeackfirst; // @[MSHR.scala:122:33, :190:{34,45}]
wire _io_schedule_bits_dir_valid_T_2 = ~s_writeback; // @[MSHR.scala:139:33, :190:70]
wire _io_schedule_bits_dir_valid_T_3 = _io_schedule_bits_dir_valid_T_2 & no_wait; // @[MSHR.scala:183:83, :190:{70,83}]
assign _io_schedule_bits_dir_valid_T_4 = _io_schedule_bits_dir_valid_T_1 | _io_schedule_bits_dir_valid_T_3; // @[MSHR.scala:190:{45,66,83}]
assign io_schedule_bits_dir_valid_0 = _io_schedule_bits_dir_valid_T_4; // @[MSHR.scala:84:7, :190:66]
wire _io_schedule_valid_T = io_schedule_bits_a_valid_0 | io_schedule_bits_b_valid_0; // @[MSHR.scala:84:7, :192:49]
wire _io_schedule_valid_T_1 = _io_schedule_valid_T | io_schedule_bits_c_valid_0; // @[MSHR.scala:84:7, :192:{49,77}]
wire _io_schedule_valid_T_2 = _io_schedule_valid_T_1 | io_schedule_bits_d_valid_0; // @[MSHR.scala:84:7, :192:{77,105}]
wire _io_schedule_valid_T_3 = _io_schedule_valid_T_2 | io_schedule_bits_e_valid_0; // @[MSHR.scala:84:7, :192:105, :193:49]
wire _io_schedule_valid_T_4 = _io_schedule_valid_T_3 | io_schedule_bits_x_valid_0; // @[MSHR.scala:84:7, :193:{49,77}]
assign _io_schedule_valid_T_5 = _io_schedule_valid_T_4 | io_schedule_bits_dir_valid_0; // @[MSHR.scala:84:7, :193:{77,105}]
assign io_schedule_valid_0 = _io_schedule_valid_T_5; // @[MSHR.scala:84:7, :193:105]
wire _io_schedule_bits_dir_bits_data_WIRE_dirty = final_meta_writeback_dirty; // @[MSHR.scala:215:38, :310:71]
wire [1:0] _io_schedule_bits_dir_bits_data_WIRE_state = final_meta_writeback_state; // @[MSHR.scala:215:38, :310:71]
wire _io_schedule_bits_dir_bits_data_WIRE_clients = final_meta_writeback_clients; // @[MSHR.scala:215:38, :310:71]
wire after_c = final_meta_writeback_clients; // @[MSHR.scala:215:38, :315:27]
wire prior_c = final_meta_writeback_clients; // @[MSHR.scala:215:38, :315:27]
wire [8:0] _io_schedule_bits_dir_bits_data_WIRE_tag = final_meta_writeback_tag; // @[MSHR.scala:215:38, :310:71]
wire final_meta_writeback_hit; // @[MSHR.scala:215:38]
wire req_clientBit = request_source == 6'h28; // @[Parameters.scala:46:9]
wire _req_needT_T = request_opcode[2]; // @[Parameters.scala:269:12]
wire _final_meta_writeback_dirty_T_3 = request_opcode[2]; // @[Parameters.scala:269:12]
wire _req_needT_T_1 = ~_req_needT_T; // @[Parameters.scala:269:{5,12}]
wire _GEN = request_opcode == 3'h5; // @[Parameters.scala:270:13]
wire _req_needT_T_2; // @[Parameters.scala:270:13]
assign _req_needT_T_2 = _GEN; // @[Parameters.scala:270:13]
wire _excluded_client_T_6; // @[Parameters.scala:279:117]
assign _excluded_client_T_6 = _GEN; // @[Parameters.scala:270:13, :279:117]
wire _GEN_0 = request_param == 3'h1; // @[Parameters.scala:270:42]
wire _req_needT_T_3; // @[Parameters.scala:270:42]
assign _req_needT_T_3 = _GEN_0; // @[Parameters.scala:270:42]
wire _final_meta_writeback_clients_T; // @[Parameters.scala:282:11]
assign _final_meta_writeback_clients_T = _GEN_0; // @[Parameters.scala:270:42, :282:11]
wire _io_schedule_bits_d_bits_param_T_7; // @[MSHR.scala:299:79]
assign _io_schedule_bits_d_bits_param_T_7 = _GEN_0; // @[Parameters.scala:270:42]
wire _req_needT_T_4 = _req_needT_T_2 & _req_needT_T_3; // @[Parameters.scala:270:{13,33,42}]
wire _req_needT_T_5 = _req_needT_T_1 | _req_needT_T_4; // @[Parameters.scala:269:{5,16}, :270:33]
wire _GEN_1 = request_opcode == 3'h6; // @[Parameters.scala:271:14]
wire _req_needT_T_6; // @[Parameters.scala:271:14]
assign _req_needT_T_6 = _GEN_1; // @[Parameters.scala:271:14]
wire _req_acquire_T; // @[MSHR.scala:219:36]
assign _req_acquire_T = _GEN_1; // @[Parameters.scala:271:14]
wire _excluded_client_T_1; // @[Parameters.scala:279:12]
assign _excluded_client_T_1 = _GEN_1; // @[Parameters.scala:271:14, :279:12]
wire _req_needT_T_7 = &request_opcode; // @[Parameters.scala:271:52]
wire _req_needT_T_8 = _req_needT_T_6 | _req_needT_T_7; // @[Parameters.scala:271:{14,42,52}]
wire _req_needT_T_9 = |request_param; // @[Parameters.scala:271:89]
wire _req_needT_T_10 = _req_needT_T_8 & _req_needT_T_9; // @[Parameters.scala:271:{42,80,89}]
wire req_needT = _req_needT_T_5 | _req_needT_T_10; // @[Parameters.scala:269:16, :270:70, :271:80]
wire _req_acquire_T_1 = &request_opcode; // @[Parameters.scala:271:52]
wire req_acquire = _req_acquire_T | _req_acquire_T_1; // @[MSHR.scala:219:{36,53,71}]
wire meta_no_clients = ~_meta_no_clients_T; // @[MSHR.scala:220:{25,39}]
wire _req_promoteT_T = &meta_state; // @[MSHR.scala:100:17, :221:81]
wire _req_promoteT_T_1 = meta_no_clients & _req_promoteT_T; // @[MSHR.scala:220:25, :221:{67,81}]
wire _req_promoteT_T_2 = meta_hit ? _req_promoteT_T_1 : gotT; // @[MSHR.scala:100:17, :148:17, :221:{40,67}]
wire req_promoteT = req_acquire & _req_promoteT_T_2; // @[MSHR.scala:219:53, :221:{34,40}]
wire _final_meta_writeback_dirty_T = request_opcode[0]; // @[MSHR.scala:98:20, :224:65]
wire _final_meta_writeback_dirty_T_1 = meta_dirty | _final_meta_writeback_dirty_T; // @[MSHR.scala:100:17, :224:{48,65}]
wire _final_meta_writeback_state_T = request_param != 3'h3; // @[MSHR.scala:98:20, :225:55]
wire _GEN_2 = meta_state == 2'h2; // @[MSHR.scala:100:17, :225:78]
wire _final_meta_writeback_state_T_1; // @[MSHR.scala:225:78]
assign _final_meta_writeback_state_T_1 = _GEN_2; // @[MSHR.scala:225:78]
wire _final_meta_writeback_state_T_12; // @[MSHR.scala:240:70]
assign _final_meta_writeback_state_T_12 = _GEN_2; // @[MSHR.scala:225:78, :240:70]
wire _evict_T_2; // @[MSHR.scala:317:26]
assign _evict_T_2 = _GEN_2; // @[MSHR.scala:225:78, :317:26]
wire _before_T_1; // @[MSHR.scala:317:26]
assign _before_T_1 = _GEN_2; // @[MSHR.scala:225:78, :317:26]
wire _final_meta_writeback_state_T_2 = _final_meta_writeback_state_T & _final_meta_writeback_state_T_1; // @[MSHR.scala:225:{55,64,78}]
wire [1:0] _final_meta_writeback_state_T_3 = _final_meta_writeback_state_T_2 ? 2'h3 : meta_state; // @[MSHR.scala:100:17, :225:{40,64}]
wire _GEN_3 = request_param == 3'h2; // @[Parameters.scala:282:43]
wire _final_meta_writeback_clients_T_1; // @[Parameters.scala:282:43]
assign _final_meta_writeback_clients_T_1 = _GEN_3; // @[Parameters.scala:282:43]
wire _io_schedule_bits_d_bits_param_T_5; // @[MSHR.scala:299:79]
assign _io_schedule_bits_d_bits_param_T_5 = _GEN_3; // @[Parameters.scala:282:43]
wire _final_meta_writeback_clients_T_2 = _final_meta_writeback_clients_T | _final_meta_writeback_clients_T_1; // @[Parameters.scala:282:{11,34,43}]
wire _final_meta_writeback_clients_T_3 = request_param == 3'h5; // @[Parameters.scala:282:75]
wire _final_meta_writeback_clients_T_4 = _final_meta_writeback_clients_T_2 | _final_meta_writeback_clients_T_3; // @[Parameters.scala:282:{34,66,75}]
wire _final_meta_writeback_clients_T_5 = _final_meta_writeback_clients_T_4 & req_clientBit; // @[Parameters.scala:46:9]
wire _final_meta_writeback_clients_T_6 = ~_final_meta_writeback_clients_T_5; // @[MSHR.scala:226:{52,56}]
wire _final_meta_writeback_clients_T_7 = meta_clients & _final_meta_writeback_clients_T_6; // @[MSHR.scala:100:17, :226:{50,52}]
wire _final_meta_writeback_clients_T_8 = ~probes_toN; // @[MSHR.scala:151:23, :232:54]
wire _final_meta_writeback_clients_T_9 = meta_clients & _final_meta_writeback_clients_T_8; // @[MSHR.scala:100:17, :232:{52,54}]
wire _final_meta_writeback_dirty_T_2 = meta_hit & meta_dirty; // @[MSHR.scala:100:17, :236:45]
wire _final_meta_writeback_dirty_T_4 = ~_final_meta_writeback_dirty_T_3; // @[MSHR.scala:236:{63,78}]
wire _final_meta_writeback_dirty_T_5 = _final_meta_writeback_dirty_T_2 | _final_meta_writeback_dirty_T_4; // @[MSHR.scala:236:{45,60,63}]
wire [1:0] _GEN_4 = {1'h1, ~req_acquire}; // @[MSHR.scala:219:53, :238:40]
wire [1:0] _final_meta_writeback_state_T_4; // @[MSHR.scala:238:40]
assign _final_meta_writeback_state_T_4 = _GEN_4; // @[MSHR.scala:238:40]
wire [1:0] _final_meta_writeback_state_T_6; // @[MSHR.scala:239:65]
assign _final_meta_writeback_state_T_6 = _GEN_4; // @[MSHR.scala:238:40, :239:65]
wire _final_meta_writeback_state_T_5 = ~meta_hit; // @[MSHR.scala:100:17, :239:41]
wire [1:0] _final_meta_writeback_state_T_7 = gotT ? _final_meta_writeback_state_T_6 : 2'h1; // @[MSHR.scala:148:17, :239:{55,65}]
wire _final_meta_writeback_state_T_8 = meta_no_clients & req_acquire; // @[MSHR.scala:219:53, :220:25, :244:72]
wire [1:0] _final_meta_writeback_state_T_9 = {1'h1, ~_final_meta_writeback_state_T_8}; // @[MSHR.scala:244:{55,72}]
wire _GEN_5 = meta_state == 2'h1; // @[MSHR.scala:100:17, :240:70]
wire _final_meta_writeback_state_T_10; // @[MSHR.scala:240:70]
assign _final_meta_writeback_state_T_10 = _GEN_5; // @[MSHR.scala:240:70]
wire _io_schedule_bits_c_bits_param_T; // @[MSHR.scala:291:53]
assign _io_schedule_bits_c_bits_param_T = _GEN_5; // @[MSHR.scala:240:70, :291:53]
wire _evict_T_1; // @[MSHR.scala:317:26]
assign _evict_T_1 = _GEN_5; // @[MSHR.scala:240:70, :317:26]
wire _before_T; // @[MSHR.scala:317:26]
assign _before_T = _GEN_5; // @[MSHR.scala:240:70, :317:26]
wire [1:0] _final_meta_writeback_state_T_13 = {_final_meta_writeback_state_T_12, 1'h1}; // @[MSHR.scala:240:70]
wire _final_meta_writeback_state_T_14 = &meta_state; // @[MSHR.scala:100:17, :221:81, :240:70]
wire [1:0] _final_meta_writeback_state_T_15 = _final_meta_writeback_state_T_14 ? _final_meta_writeback_state_T_9 : _final_meta_writeback_state_T_13; // @[MSHR.scala:240:70, :244:55]
wire [1:0] _final_meta_writeback_state_T_16 = _final_meta_writeback_state_T_5 ? _final_meta_writeback_state_T_7 : _final_meta_writeback_state_T_15; // @[MSHR.scala:239:{40,41,55}, :240:70]
wire [1:0] _final_meta_writeback_state_T_17 = req_needT ? _final_meta_writeback_state_T_4 : _final_meta_writeback_state_T_16; // @[Parameters.scala:270:70]
wire _final_meta_writeback_clients_T_10 = ~probes_toN; // @[MSHR.scala:151:23, :232:54, :245:66]
wire _final_meta_writeback_clients_T_11 = meta_clients & _final_meta_writeback_clients_T_10; // @[MSHR.scala:100:17, :245:{64,66}]
wire _final_meta_writeback_clients_T_12 = meta_hit & _final_meta_writeback_clients_T_11; // @[MSHR.scala:100:17, :245:{40,64}]
wire _final_meta_writeback_clients_T_13 = req_acquire & req_clientBit; // @[Parameters.scala:46:9]
wire _final_meta_writeback_clients_T_14 = _final_meta_writeback_clients_T_12 | _final_meta_writeback_clients_T_13; // @[MSHR.scala:245:{40,84}, :246:40]
assign final_meta_writeback_tag = request_prio_2 | request_control ? meta_tag : request_tag; // @[MSHR.scala:98:20, :100:17, :215:38, :223:52, :228:53, :247:30]
wire _final_meta_writeback_clients_T_15 = ~probes_toN; // @[MSHR.scala:151:23, :232:54, :258:54]
wire _final_meta_writeback_clients_T_16 = meta_clients & _final_meta_writeback_clients_T_15; // @[MSHR.scala:100:17, :258:{52,54}]
assign final_meta_writeback_hit = bad_grant ? meta_hit : request_prio_2 | ~request_control; // @[MSHR.scala:98:20, :100:17, :149:22, :215:38, :223:52, :227:34, :228:53, :234:30, :248:30, :251:20, :252:21]
assign final_meta_writeback_dirty = ~bad_grant & (request_prio_2 ? _final_meta_writeback_dirty_T_1 : request_control ? ~meta_hit & meta_dirty : _final_meta_writeback_dirty_T_5); // @[MSHR.scala:98:20, :100:17, :149:22, :215:38, :223:52, :224:{34,48}, :228:53, :229:21, :230:36, :236:{32,60}, :251:20, :252:21]
assign final_meta_writeback_state = bad_grant ? {1'h0, meta_hit} : request_prio_2 ? _final_meta_writeback_state_T_3 : request_control ? (meta_hit ? 2'h0 : meta_state) : _final_meta_writeback_state_T_17; // @[MSHR.scala:98:20, :100:17, :149:22, :215:38, :223:52, :225:{34,40}, :228:53, :229:21, :231:36, :237:{32,38}, :251:20, :252:21, :257:36, :263:36]
assign final_meta_writeback_clients = bad_grant ? meta_hit & _final_meta_writeback_clients_T_16 : request_prio_2 ? _final_meta_writeback_clients_T_7 : request_control ? (meta_hit ? _final_meta_writeback_clients_T_9 : meta_clients) : _final_meta_writeback_clients_T_14; // @[MSHR.scala:98:20, :100:17, :149:22, :215:38, :223:52, :226:{34,50}, :228:53, :229:21, :232:{36,52}, :245:{34,84}, :251:20, :252:21, :258:{36,52}, :264:36]
wire _honour_BtoT_T = meta_clients & req_clientBit; // @[Parameters.scala:46:9]
wire _honour_BtoT_T_1 = _honour_BtoT_T; // @[MSHR.scala:276:{47,64}]
wire honour_BtoT = meta_hit & _honour_BtoT_T_1; // @[MSHR.scala:100:17, :276:{30,64}]
wire _excluded_client_T_2 = &request_opcode; // @[Parameters.scala:271:52, :279:50]
wire _excluded_client_T_3 = _excluded_client_T_1 | _excluded_client_T_2; // @[Parameters.scala:279:{12,40,50}]
wire _excluded_client_T_4 = request_opcode == 3'h4; // @[Parameters.scala:279:87]
wire _excluded_client_T_5 = _excluded_client_T_3 | _excluded_client_T_4; // @[Parameters.scala:279:{40,77,87}]
wire _excluded_client_T_8 = _excluded_client_T_5; // @[Parameters.scala:279:{77,106}]
wire [1:0] _io_schedule_bits_a_bits_param_T = meta_hit ? 2'h2 : 2'h1; // @[MSHR.scala:100:17, :282:56]
wire [1:0] _io_schedule_bits_a_bits_param_T_1 = req_needT ? _io_schedule_bits_a_bits_param_T : 2'h0; // @[Parameters.scala:270:70]
assign io_schedule_bits_a_bits_param_0 = {1'h0, _io_schedule_bits_a_bits_param_T_1}; // @[MSHR.scala:84:7, :282:{35,41}]
wire _io_schedule_bits_a_bits_block_T = request_size != 3'h6; // @[MSHR.scala:98:20, :283:51]
wire _io_schedule_bits_a_bits_block_T_1 = request_opcode == 3'h0; // @[MSHR.scala:98:20, :284:55]
wire _io_schedule_bits_a_bits_block_T_2 = &request_opcode; // @[Parameters.scala:271:52]
wire _io_schedule_bits_a_bits_block_T_3 = _io_schedule_bits_a_bits_block_T_1 | _io_schedule_bits_a_bits_block_T_2; // @[MSHR.scala:284:{55,71,89}]
wire _io_schedule_bits_a_bits_block_T_4 = ~_io_schedule_bits_a_bits_block_T_3; // @[MSHR.scala:284:{38,71}]
assign _io_schedule_bits_a_bits_block_T_5 = _io_schedule_bits_a_bits_block_T | _io_schedule_bits_a_bits_block_T_4; // @[MSHR.scala:283:{51,91}, :284:38]
assign io_schedule_bits_a_bits_block_0 = _io_schedule_bits_a_bits_block_T_5; // @[MSHR.scala:84:7, :283:91]
wire _io_schedule_bits_b_bits_param_T = ~s_rprobe; // @[MSHR.scala:121:33, :185:31, :286:42]
wire [1:0] _io_schedule_bits_b_bits_param_T_1 = req_needT ? 2'h2 : 2'h1; // @[Parameters.scala:270:70]
wire [2:0] _io_schedule_bits_b_bits_param_T_2 = {1'h0, _io_schedule_bits_b_bits_param_T_1}; // @[MSHR.scala:286:{61,97}]
assign _io_schedule_bits_b_bits_param_T_3 = _io_schedule_bits_b_bits_param_T ? 3'h2 : _io_schedule_bits_b_bits_param_T_2; // @[MSHR.scala:286:{41,42,61}]
assign io_schedule_bits_b_bits_param_0 = _io_schedule_bits_b_bits_param_T_3; // @[MSHR.scala:84:7, :286:41]
wire _io_schedule_bits_b_bits_tag_T = ~s_rprobe; // @[MSHR.scala:121:33, :185:31, :287:42]
assign _io_schedule_bits_b_bits_tag_T_1 = _io_schedule_bits_b_bits_tag_T ? meta_tag : request_tag; // @[MSHR.scala:98:20, :100:17, :287:{41,42}]
assign io_schedule_bits_b_bits_tag_0 = _io_schedule_bits_b_bits_tag_T_1; // @[MSHR.scala:84:7, :287:41]
assign io_schedule_bits_b_bits_clients_0 = _io_schedule_bits_b_bits_clients_T_1; // @[MSHR.scala:84:7, :289:51]
assign _io_schedule_bits_c_bits_opcode_T = {2'h3, meta_dirty}; // @[MSHR.scala:100:17, :290:41]
assign io_schedule_bits_c_bits_opcode_0 = _io_schedule_bits_c_bits_opcode_T; // @[MSHR.scala:84:7, :290:41]
assign _io_schedule_bits_c_bits_param_T_1 = _io_schedule_bits_c_bits_param_T ? 3'h2 : 3'h1; // @[MSHR.scala:291:{41,53}]
assign io_schedule_bits_c_bits_param_0 = _io_schedule_bits_c_bits_param_T_1; // @[MSHR.scala:84:7, :291:41]
wire _io_schedule_bits_d_bits_param_T = ~req_acquire; // @[MSHR.scala:219:53, :298:42]
wire [1:0] _io_schedule_bits_d_bits_param_T_1 = {1'h0, req_promoteT}; // @[MSHR.scala:221:34, :300:53]
wire [1:0] _io_schedule_bits_d_bits_param_T_2 = honour_BtoT ? 2'h2 : 2'h1; // @[MSHR.scala:276:30, :301:53]
wire _io_schedule_bits_d_bits_param_T_3 = ~(|request_param); // @[Parameters.scala:271:89]
wire [2:0] _io_schedule_bits_d_bits_param_T_4 = _io_schedule_bits_d_bits_param_T_3 ? {1'h0, _io_schedule_bits_d_bits_param_T_1} : request_param; // @[MSHR.scala:98:20, :299:79, :300:53]
wire [2:0] _io_schedule_bits_d_bits_param_T_6 = _io_schedule_bits_d_bits_param_T_5 ? {1'h0, _io_schedule_bits_d_bits_param_T_2} : _io_schedule_bits_d_bits_param_T_4; // @[MSHR.scala:299:79, :301:53]
wire [2:0] _io_schedule_bits_d_bits_param_T_8 = _io_schedule_bits_d_bits_param_T_7 ? 3'h1 : _io_schedule_bits_d_bits_param_T_6; // @[MSHR.scala:299:79]
assign _io_schedule_bits_d_bits_param_T_9 = _io_schedule_bits_d_bits_param_T ? request_param : _io_schedule_bits_d_bits_param_T_8; // @[MSHR.scala:98:20, :298:{41,42}, :299:79]
assign io_schedule_bits_d_bits_param_0 = _io_schedule_bits_d_bits_param_T_9; // @[MSHR.scala:84:7, :298:41]
wire _io_schedule_bits_dir_bits_data_T = ~s_release; // @[MSHR.scala:124:33, :186:32, :310:42]
assign _io_schedule_bits_dir_bits_data_T_1_dirty = ~_io_schedule_bits_dir_bits_data_T & _io_schedule_bits_dir_bits_data_WIRE_dirty; // @[MSHR.scala:310:{41,42,71}]
assign _io_schedule_bits_dir_bits_data_T_1_state = _io_schedule_bits_dir_bits_data_T ? 2'h0 : _io_schedule_bits_dir_bits_data_WIRE_state; // @[MSHR.scala:310:{41,42,71}]
assign _io_schedule_bits_dir_bits_data_T_1_clients = ~_io_schedule_bits_dir_bits_data_T & _io_schedule_bits_dir_bits_data_WIRE_clients; // @[MSHR.scala:310:{41,42,71}]
assign _io_schedule_bits_dir_bits_data_T_1_tag = _io_schedule_bits_dir_bits_data_T ? 9'h0 : _io_schedule_bits_dir_bits_data_WIRE_tag; // @[MSHR.scala:310:{41,42,71}]
assign io_schedule_bits_dir_bits_data_dirty_0 = _io_schedule_bits_dir_bits_data_T_1_dirty; // @[MSHR.scala:84:7, :310:41]
assign io_schedule_bits_dir_bits_data_state_0 = _io_schedule_bits_dir_bits_data_T_1_state; // @[MSHR.scala:84:7, :310:41]
assign io_schedule_bits_dir_bits_data_clients_0 = _io_schedule_bits_dir_bits_data_T_1_clients; // @[MSHR.scala:84:7, :310:41]
assign io_schedule_bits_dir_bits_data_tag_0 = _io_schedule_bits_dir_bits_data_T_1_tag; // @[MSHR.scala:84:7, :310:41]
wire _evict_T = ~meta_hit; // @[MSHR.scala:100:17, :239:41, :338:32]
wire [3:0] evict; // @[MSHR.scala:314:26]
wire _evict_out_T = ~evict_c; // @[MSHR.scala:315:27, :318:32]
wire [1:0] _GEN_6 = {1'h1, ~meta_dirty}; // @[MSHR.scala:100:17, :319:32]
wire [1:0] _evict_out_T_1; // @[MSHR.scala:319:32]
assign _evict_out_T_1 = _GEN_6; // @[MSHR.scala:319:32]
wire [1:0] _before_out_T_1; // @[MSHR.scala:319:32]
assign _before_out_T_1 = _GEN_6; // @[MSHR.scala:319:32]
wire _evict_T_3 = &meta_state; // @[MSHR.scala:100:17, :221:81, :317:26]
wire [2:0] _GEN_7 = {2'h2, ~meta_dirty}; // @[MSHR.scala:100:17, :319:32, :320:39]
wire [2:0] _evict_out_T_2; // @[MSHR.scala:320:39]
assign _evict_out_T_2 = _GEN_7; // @[MSHR.scala:320:39]
wire [2:0] _before_out_T_2; // @[MSHR.scala:320:39]
assign _before_out_T_2 = _GEN_7; // @[MSHR.scala:320:39]
wire [2:0] _GEN_8 = {2'h3, ~meta_dirty}; // @[MSHR.scala:100:17, :319:32, :320:76]
wire [2:0] _evict_out_T_3; // @[MSHR.scala:320:76]
assign _evict_out_T_3 = _GEN_8; // @[MSHR.scala:320:76]
wire [2:0] _before_out_T_3; // @[MSHR.scala:320:76]
assign _before_out_T_3 = _GEN_8; // @[MSHR.scala:320:76]
wire [2:0] _evict_out_T_4 = evict_c ? _evict_out_T_2 : _evict_out_T_3; // @[MSHR.scala:315:27, :320:{32,39,76}]
wire _evict_T_4 = ~(|meta_state); // @[MSHR.scala:100:17, :104:22, :317:26]
wire _evict_T_5 = ~_evict_T; // @[MSHR.scala:323:11, :338:32]
assign evict = _evict_T_5 ? 4'h8 : _evict_T_1 ? {3'h0, _evict_out_T} : _evict_T_2 ? {2'h0, _evict_out_T_1} : _evict_T_3 ? {1'h0, _evict_out_T_4} : {_evict_T_4, 3'h0}; // @[MSHR.scala:314:26, :317:26, :318:{26,32}, :319:{26,32}, :320:{26,32}, :321:26, :323:{11,17,23}]
wire [3:0] before_0; // @[MSHR.scala:314:26]
wire _before_out_T = ~before_c; // @[MSHR.scala:315:27, :318:32]
wire _before_T_2 = &meta_state; // @[MSHR.scala:100:17, :221:81, :317:26]
wire [2:0] _before_out_T_4 = before_c ? _before_out_T_2 : _before_out_T_3; // @[MSHR.scala:315:27, :320:{32,39,76}]
wire _before_T_3 = ~(|meta_state); // @[MSHR.scala:100:17, :104:22, :317:26]
wire _before_T_4 = ~meta_hit; // @[MSHR.scala:100:17, :239:41, :323:11]
assign before_0 = _before_T_4 ? 4'h8 : _before_T ? {3'h0, _before_out_T} : _before_T_1 ? {2'h0, _before_out_T_1} : _before_T_2 ? {1'h0, _before_out_T_4} : {_before_T_3, 3'h0}; // @[MSHR.scala:314:26, :317:26, :318:{26,32}, :319:{26,32}, :320:{26,32}, :321:26, :323:{11,17,23}]
wire [3:0] after; // @[MSHR.scala:314:26]
wire _GEN_9 = final_meta_writeback_state == 2'h1; // @[MSHR.scala:215:38, :317:26]
wire _after_T; // @[MSHR.scala:317:26]
assign _after_T = _GEN_9; // @[MSHR.scala:317:26]
wire _prior_T; // @[MSHR.scala:317:26]
assign _prior_T = _GEN_9; // @[MSHR.scala:317:26]
wire _after_out_T = ~after_c; // @[MSHR.scala:315:27, :318:32]
wire _GEN_10 = final_meta_writeback_state == 2'h2; // @[MSHR.scala:215:38, :317:26]
wire _after_T_1; // @[MSHR.scala:317:26]
assign _after_T_1 = _GEN_10; // @[MSHR.scala:317:26]
wire _prior_T_1; // @[MSHR.scala:317:26]
assign _prior_T_1 = _GEN_10; // @[MSHR.scala:317:26]
wire [1:0] _GEN_11 = {1'h1, ~final_meta_writeback_dirty}; // @[MSHR.scala:215:38, :319:32]
wire [1:0] _after_out_T_1; // @[MSHR.scala:319:32]
assign _after_out_T_1 = _GEN_11; // @[MSHR.scala:319:32]
wire [1:0] _prior_out_T_1; // @[MSHR.scala:319:32]
assign _prior_out_T_1 = _GEN_11; // @[MSHR.scala:319:32]
wire _after_T_2 = &final_meta_writeback_state; // @[MSHR.scala:215:38, :317:26]
wire [2:0] _GEN_12 = {2'h2, ~final_meta_writeback_dirty}; // @[MSHR.scala:215:38, :319:32, :320:39]
wire [2:0] _after_out_T_2; // @[MSHR.scala:320:39]
assign _after_out_T_2 = _GEN_12; // @[MSHR.scala:320:39]
wire [2:0] _prior_out_T_2; // @[MSHR.scala:320:39]
assign _prior_out_T_2 = _GEN_12; // @[MSHR.scala:320:39]
wire [2:0] _GEN_13 = {2'h3, ~final_meta_writeback_dirty}; // @[MSHR.scala:215:38, :319:32, :320:76]
wire [2:0] _after_out_T_3; // @[MSHR.scala:320:76]
assign _after_out_T_3 = _GEN_13; // @[MSHR.scala:320:76]
wire [2:0] _prior_out_T_3; // @[MSHR.scala:320:76]
assign _prior_out_T_3 = _GEN_13; // @[MSHR.scala:320:76]
wire [2:0] _after_out_T_4 = after_c ? _after_out_T_2 : _after_out_T_3; // @[MSHR.scala:315:27, :320:{32,39,76}]
wire _GEN_14 = final_meta_writeback_state == 2'h0; // @[MSHR.scala:215:38, :317:26]
wire _after_T_3; // @[MSHR.scala:317:26]
assign _after_T_3 = _GEN_14; // @[MSHR.scala:317:26]
wire _prior_T_3; // @[MSHR.scala:317:26]
assign _prior_T_3 = _GEN_14; // @[MSHR.scala:317:26]
assign after = _after_T ? {3'h0, _after_out_T} : _after_T_1 ? {2'h0, _after_out_T_1} : _after_T_2 ? {1'h0, _after_out_T_4} : {_after_T_3, 3'h0}; // @[MSHR.scala:314:26, :317:26, :318:{26,32}, :319:{26,32}, :320:{26,32}, :321:26]
wire probe_bit = io_sinkc_bits_source_0 == 6'h28; // @[Parameters.scala:46:9]
wire _GEN_15 = probes_done | probe_bit; // @[Parameters.scala:46:9]
wire _last_probe_T; // @[MSHR.scala:459:33]
assign _last_probe_T = _GEN_15; // @[MSHR.scala:459:33]
wire _probes_done_T; // @[MSHR.scala:467:32]
assign _probes_done_T = _GEN_15; // @[MSHR.scala:459:33, :467:32]
wire last_probe = _last_probe_T == _last_probe_T_2; // @[MSHR.scala:459:{33,46,64}]
wire _probe_toN_T = io_sinkc_bits_param_0 == 3'h1; // @[Parameters.scala:282:11]
wire _probe_toN_T_1 = io_sinkc_bits_param_0 == 3'h2; // @[Parameters.scala:282:43]
wire _probe_toN_T_2 = _probe_toN_T | _probe_toN_T_1; // @[Parameters.scala:282:{11,34,43}]
wire _probe_toN_T_3 = io_sinkc_bits_param_0 == 3'h5; // @[Parameters.scala:282:75]
wire probe_toN = _probe_toN_T_2 | _probe_toN_T_3; // @[Parameters.scala:282:{34,66,75}]
wire _probes_toN_T = probe_toN & probe_bit; // @[Parameters.scala:46:9]
wire _probes_toN_T_1 = probes_toN | _probes_toN_T; // @[MSHR.scala:151:23, :468:{30,35}]
wire _probes_noT_T = io_sinkc_bits_param_0 != 3'h3; // @[MSHR.scala:84:7, :469:53]
wire _probes_noT_T_1 = probes_noT | _probes_noT_T; // @[MSHR.scala:152:23, :469:{30,53}]
wire _w_rprobeackfirst_T = w_rprobeackfirst | last_probe; // @[MSHR.scala:122:33, :459:46, :470:42]
wire _GEN_16 = last_probe & io_sinkc_bits_last_0; // @[MSHR.scala:84:7, :459:46, :471:55]
wire _w_rprobeacklast_T; // @[MSHR.scala:471:55]
assign _w_rprobeacklast_T = _GEN_16; // @[MSHR.scala:471:55]
wire _w_pprobeacklast_T; // @[MSHR.scala:473:55]
assign _w_pprobeacklast_T = _GEN_16; // @[MSHR.scala:471:55, :473:55]
wire _w_rprobeacklast_T_1 = w_rprobeacklast | _w_rprobeacklast_T; // @[MSHR.scala:123:33, :471:{40,55}]
wire _w_pprobeackfirst_T = w_pprobeackfirst | last_probe; // @[MSHR.scala:132:33, :459:46, :472:42]
wire _w_pprobeacklast_T_1 = w_pprobeacklast | _w_pprobeacklast_T; // @[MSHR.scala:133:33, :473:{40,55}]
wire _set_pprobeack_T = ~(|request_offset); // @[MSHR.scala:98:20, :475:77]
wire _set_pprobeack_T_1 = io_sinkc_bits_last_0 | _set_pprobeack_T; // @[MSHR.scala:84:7, :475:{59,77}]
wire set_pprobeack = last_probe & _set_pprobeack_T_1; // @[MSHR.scala:459:46, :475:{36,59}]
wire _w_pprobeack_T = w_pprobeack | set_pprobeack; // @[MSHR.scala:134:33, :475:36, :476:32]
wire _w_grant_T = ~(|request_offset); // @[MSHR.scala:98:20, :475:77, :490:33]
wire _w_grant_T_1 = _w_grant_T | io_sinkd_bits_last_0; // @[MSHR.scala:84:7, :490:{33,41}]
wire _gotT_T = io_sinkd_bits_param_0 == 3'h0; // @[MSHR.scala:84:7, :493:35]
wire _new_meta_T = io_allocate_valid_0 & io_allocate_bits_repeat_0; // @[MSHR.scala:84:7, :505:40]
wire new_meta_dirty = _new_meta_T ? final_meta_writeback_dirty : io_directory_bits_dirty_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}]
wire [1:0] new_meta_state = _new_meta_T ? final_meta_writeback_state : io_directory_bits_state_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}]
wire new_meta_clients = _new_meta_T ? final_meta_writeback_clients : io_directory_bits_clients_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}]
wire [8:0] new_meta_tag = _new_meta_T ? final_meta_writeback_tag : io_directory_bits_tag_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}]
wire new_meta_hit = _new_meta_T ? final_meta_writeback_hit : io_directory_bits_hit_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}]
wire [3:0] new_meta_way = _new_meta_T ? final_meta_writeback_way : io_directory_bits_way_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}]
wire new_request_prio_2 = io_allocate_valid_0 ? allocate_as_full_prio_2 : request_prio_2; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire new_request_control = io_allocate_valid_0 ? allocate_as_full_control : request_control; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire [2:0] new_request_opcode = io_allocate_valid_0 ? allocate_as_full_opcode : request_opcode; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire [2:0] new_request_param = io_allocate_valid_0 ? allocate_as_full_param : request_param; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire [2:0] new_request_size = io_allocate_valid_0 ? allocate_as_full_size : request_size; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire [5:0] new_request_source = io_allocate_valid_0 ? allocate_as_full_source : request_source; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire [8:0] new_request_tag = io_allocate_valid_0 ? allocate_as_full_tag : request_tag; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire [5:0] new_request_offset = io_allocate_valid_0 ? allocate_as_full_offset : request_offset; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire [5:0] new_request_put = io_allocate_valid_0 ? allocate_as_full_put : request_put; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire [10:0] new_request_set = io_allocate_valid_0 ? allocate_as_full_set : request_set; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire _new_needT_T = new_request_opcode[2]; // @[Parameters.scala:269:12]
wire _new_needT_T_1 = ~_new_needT_T; // @[Parameters.scala:269:{5,12}]
wire _GEN_17 = new_request_opcode == 3'h5; // @[Parameters.scala:270:13]
wire _new_needT_T_2; // @[Parameters.scala:270:13]
assign _new_needT_T_2 = _GEN_17; // @[Parameters.scala:270:13]
wire _new_skipProbe_T_5; // @[Parameters.scala:279:117]
assign _new_skipProbe_T_5 = _GEN_17; // @[Parameters.scala:270:13, :279:117]
wire _new_needT_T_3 = new_request_param == 3'h1; // @[Parameters.scala:270:42]
wire _new_needT_T_4 = _new_needT_T_2 & _new_needT_T_3; // @[Parameters.scala:270:{13,33,42}]
wire _new_needT_T_5 = _new_needT_T_1 | _new_needT_T_4; // @[Parameters.scala:269:{5,16}, :270:33]
wire _T_615 = new_request_opcode == 3'h6; // @[Parameters.scala:271:14]
wire _new_needT_T_6; // @[Parameters.scala:271:14]
assign _new_needT_T_6 = _T_615; // @[Parameters.scala:271:14]
wire _new_skipProbe_T; // @[Parameters.scala:279:12]
assign _new_skipProbe_T = _T_615; // @[Parameters.scala:271:14, :279:12]
wire _new_needT_T_7 = &new_request_opcode; // @[Parameters.scala:271:52]
wire _new_needT_T_8 = _new_needT_T_6 | _new_needT_T_7; // @[Parameters.scala:271:{14,42,52}]
wire _new_needT_T_9 = |new_request_param; // @[Parameters.scala:271:89]
wire _new_needT_T_10 = _new_needT_T_8 & _new_needT_T_9; // @[Parameters.scala:271:{42,80,89}]
wire new_needT = _new_needT_T_5 | _new_needT_T_10; // @[Parameters.scala:269:16, :270:70, :271:80]
wire new_clientBit = new_request_source == 6'h28; // @[Parameters.scala:46:9]
wire _new_skipProbe_T_1 = &new_request_opcode; // @[Parameters.scala:271:52, :279:50]
wire _new_skipProbe_T_2 = _new_skipProbe_T | _new_skipProbe_T_1; // @[Parameters.scala:279:{12,40,50}]
wire _new_skipProbe_T_3 = new_request_opcode == 3'h4; // @[Parameters.scala:279:87]
wire _new_skipProbe_T_4 = _new_skipProbe_T_2 | _new_skipProbe_T_3; // @[Parameters.scala:279:{40,77,87}]
wire _new_skipProbe_T_7 = _new_skipProbe_T_4; // @[Parameters.scala:279:{77,106}]
wire new_skipProbe = _new_skipProbe_T_7 & new_clientBit; // @[Parameters.scala:46:9]
wire [3:0] prior; // @[MSHR.scala:314:26]
wire _prior_out_T = ~prior_c; // @[MSHR.scala:315:27, :318:32]
wire _prior_T_2 = &final_meta_writeback_state; // @[MSHR.scala:215:38, :317:26]
wire [2:0] _prior_out_T_4 = prior_c ? _prior_out_T_2 : _prior_out_T_3; // @[MSHR.scala:315:27, :320:{32,39,76}]
assign prior = _prior_T ? {3'h0, _prior_out_T} : _prior_T_1 ? {2'h0, _prior_out_T_1} : _prior_T_2 ? {1'h0, _prior_out_T_4} : {_prior_T_3, 3'h0}; // @[MSHR.scala:314:26, :317:26, :318:{26,32}, :319:{26,32}, :320:{26,32}, :321:26]
wire _T_574 = io_directory_valid_0 | _new_meta_T; // @[MSHR.scala:84:7, :505:40, :539:28] |
Generate the Verilog code corresponding to the following Chisel files.
File PE.scala:
// See README.md for license details.
package gemmini
import chisel3._
import chisel3.util._
class PEControl[T <: Data : Arithmetic](accType: T) extends Bundle {
val dataflow = UInt(1.W) // TODO make this an Enum
val propagate = UInt(1.W) // Which register should be propagated (and which should be accumulated)?
val shift = UInt(log2Up(accType.getWidth).W) // TODO this isn't correct for Floats
}
class MacUnit[T <: Data](inputType: T, cType: T, dType: T) (implicit ev: Arithmetic[T]) extends Module {
import ev._
val io = IO(new Bundle {
val in_a = Input(inputType)
val in_b = Input(inputType)
val in_c = Input(cType)
val out_d = Output(dType)
})
io.out_d := io.in_c.mac(io.in_a, io.in_b)
}
// TODO update documentation
/**
* A PE implementing a MAC operation. Configured as fully combinational when integrated into a Mesh.
* @param width Data width of operands
*/
class PE[T <: Data](inputType: T, outputType: T, accType: T, df: Dataflow.Value, max_simultaneous_matmuls: Int)
(implicit ev: Arithmetic[T]) extends Module { // Debugging variables
import ev._
val io = IO(new Bundle {
val in_a = Input(inputType)
val in_b = Input(outputType)
val in_d = Input(outputType)
val out_a = Output(inputType)
val out_b = Output(outputType)
val out_c = Output(outputType)
val in_control = Input(new PEControl(accType))
val out_control = Output(new PEControl(accType))
val in_id = Input(UInt(log2Up(max_simultaneous_matmuls).W))
val out_id = Output(UInt(log2Up(max_simultaneous_matmuls).W))
val in_last = Input(Bool())
val out_last = Output(Bool())
val in_valid = Input(Bool())
val out_valid = Output(Bool())
val bad_dataflow = Output(Bool())
})
val cType = if (df == Dataflow.WS) inputType else accType
// When creating PEs that support multiple dataflows, the
// elaboration/synthesis tools often fail to consolidate and de-duplicate
// MAC units. To force mac circuitry to be re-used, we create a "mac_unit"
// module here which just performs a single MAC operation
val mac_unit = Module(new MacUnit(inputType,
if (df == Dataflow.WS) outputType else accType, outputType))
val a = io.in_a
val b = io.in_b
val d = io.in_d
val c1 = Reg(cType)
val c2 = Reg(cType)
val dataflow = io.in_control.dataflow
val prop = io.in_control.propagate
val shift = io.in_control.shift
val id = io.in_id
val last = io.in_last
val valid = io.in_valid
io.out_a := a
io.out_control.dataflow := dataflow
io.out_control.propagate := prop
io.out_control.shift := shift
io.out_id := id
io.out_last := last
io.out_valid := valid
mac_unit.io.in_a := a
val last_s = RegEnable(prop, valid)
val flip = last_s =/= prop
val shift_offset = Mux(flip, shift, 0.U)
// Which dataflow are we using?
val OUTPUT_STATIONARY = Dataflow.OS.id.U(1.W)
val WEIGHT_STATIONARY = Dataflow.WS.id.U(1.W)
// Is c1 being computed on, or propagated forward (in the output-stationary dataflow)?
val COMPUTE = 0.U(1.W)
val PROPAGATE = 1.U(1.W)
io.bad_dataflow := false.B
when ((df == Dataflow.OS).B || ((df == Dataflow.BOTH).B && dataflow === OUTPUT_STATIONARY)) {
when(prop === PROPAGATE) {
io.out_c := (c1 >> shift_offset).clippedToWidthOf(outputType)
io.out_b := b
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c2
c2 := mac_unit.io.out_d
c1 := d.withWidthOf(cType)
}.otherwise {
io.out_c := (c2 >> shift_offset).clippedToWidthOf(outputType)
io.out_b := b
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c1
c1 := mac_unit.io.out_d
c2 := d.withWidthOf(cType)
}
}.elsewhen ((df == Dataflow.WS).B || ((df == Dataflow.BOTH).B && dataflow === WEIGHT_STATIONARY)) {
when(prop === PROPAGATE) {
io.out_c := c1
mac_unit.io.in_b := c2.asTypeOf(inputType)
mac_unit.io.in_c := b
io.out_b := mac_unit.io.out_d
c1 := d
}.otherwise {
io.out_c := c2
mac_unit.io.in_b := c1.asTypeOf(inputType)
mac_unit.io.in_c := b
io.out_b := mac_unit.io.out_d
c2 := d
}
}.otherwise {
io.bad_dataflow := true.B
//assert(false.B, "unknown dataflow")
io.out_c := DontCare
io.out_b := DontCare
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c2
}
when (!valid) {
c1 := c1
c2 := c2
mac_unit.io.in_b := DontCare
mac_unit.io.in_c := DontCare
}
}
File Arithmetic.scala:
// A simple type class for Chisel datatypes that can add and multiply. To add your own type, simply create your own:
// implicit MyTypeArithmetic extends Arithmetic[MyType] { ... }
package gemmini
import chisel3._
import chisel3.util._
import hardfloat._
// Bundles that represent the raw bits of custom datatypes
case class Float(expWidth: Int, sigWidth: Int) extends Bundle {
val bits = UInt((expWidth + sigWidth).W)
val bias: Int = (1 << (expWidth-1)) - 1
}
case class DummySInt(w: Int) extends Bundle {
val bits = UInt(w.W)
def dontCare: DummySInt = {
val o = Wire(new DummySInt(w))
o.bits := 0.U
o
}
}
// The Arithmetic typeclass which implements various arithmetic operations on custom datatypes
abstract class Arithmetic[T <: Data] {
implicit def cast(t: T): ArithmeticOps[T]
}
abstract class ArithmeticOps[T <: Data](self: T) {
def *(t: T): T
def mac(m1: T, m2: T): T // Returns (m1 * m2 + self)
def +(t: T): T
def -(t: T): T
def >>(u: UInt): T // This is a rounding shift! Rounds away from 0
def >(t: T): Bool
def identity: T
def withWidthOf(t: T): T
def clippedToWidthOf(t: T): T // Like "withWidthOf", except that it saturates
def relu: T
def zero: T
def minimum: T
// Optional parameters, which only need to be defined if you want to enable various optimizations for transformers
def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[T])] = None
def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[T])] = None
def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = None
def mult_with_reciprocal[U <: Data](reciprocal: U) = self
}
object Arithmetic {
implicit object UIntArithmetic extends Arithmetic[UInt] {
override implicit def cast(self: UInt) = new ArithmeticOps(self) {
override def *(t: UInt) = self * t
override def mac(m1: UInt, m2: UInt) = m1 * m2 + self
override def +(t: UInt) = self + t
override def -(t: UInt) = self - t
override def >>(u: UInt) = {
// The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm
// TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here?
val point_five = Mux(u === 0.U, 0.U, self(u - 1.U))
val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U
val ones_digit = self(u)
val r = point_five & (zeros | ones_digit)
(self >> u).asUInt + r
}
override def >(t: UInt): Bool = self > t
override def withWidthOf(t: UInt) = self.asTypeOf(t)
override def clippedToWidthOf(t: UInt) = {
val sat = ((1 << (t.getWidth-1))-1).U
Mux(self > sat, sat, self)(t.getWidth-1, 0)
}
override def relu: UInt = self
override def zero: UInt = 0.U
override def identity: UInt = 1.U
override def minimum: UInt = 0.U
}
}
implicit object SIntArithmetic extends Arithmetic[SInt] {
override implicit def cast(self: SInt) = new ArithmeticOps(self) {
override def *(t: SInt) = self * t
override def mac(m1: SInt, m2: SInt) = m1 * m2 + self
override def +(t: SInt) = self + t
override def -(t: SInt) = self - t
override def >>(u: UInt) = {
// The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm
// TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here?
val point_five = Mux(u === 0.U, 0.U, self(u - 1.U))
val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U
val ones_digit = self(u)
val r = (point_five & (zeros | ones_digit)).asBool
(self >> u).asSInt + Mux(r, 1.S, 0.S)
}
override def >(t: SInt): Bool = self > t
override def withWidthOf(t: SInt) = {
if (self.getWidth >= t.getWidth)
self(t.getWidth-1, 0).asSInt
else {
val sign_bits = t.getWidth - self.getWidth
val sign = self(self.getWidth-1)
Cat(Cat(Seq.fill(sign_bits)(sign)), self).asTypeOf(t)
}
}
override def clippedToWidthOf(t: SInt): SInt = {
val maxsat = ((1 << (t.getWidth-1))-1).S
val minsat = (-(1 << (t.getWidth-1))).S
MuxCase(self, Seq((self > maxsat) -> maxsat, (self < minsat) -> minsat))(t.getWidth-1, 0).asSInt
}
override def relu: SInt = Mux(self >= 0.S, self, 0.S)
override def zero: SInt = 0.S
override def identity: SInt = 1.S
override def minimum: SInt = (-(1 << (self.getWidth-1))).S
override def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = {
// TODO this uses a floating point divider, but we should use an integer divider instead
val input = Wire(Decoupled(denom_t.cloneType))
val output = Wire(Decoupled(self.cloneType))
// We translate our integer to floating-point form so that we can use the hardfloat divider
val expWidth = log2Up(self.getWidth) + 1
val sigWidth = self.getWidth
def sin_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def uin_to_float(x: UInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := false.B
in_to_rec_fn.io.in := x
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = sin_to_float(self)
val denom_rec = uin_to_float(input.bits)
// Instantiate the hardloat divider
val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options))
input.ready := divider.io.inReady
divider.io.inValid := input.valid
divider.io.sqrtOp := false.B
divider.io.a := self_rec
divider.io.b := denom_rec
divider.io.roundingMode := consts.round_minMag
divider.io.detectTininess := consts.tininess_afterRounding
output.valid := divider.io.outValid_div
output.bits := float_to_in(divider.io.out)
assert(!output.valid || output.ready)
Some((input, output))
}
override def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = {
// TODO this uses a floating point divider, but we should use an integer divider instead
val input = Wire(Decoupled(UInt(0.W)))
val output = Wire(Decoupled(self.cloneType))
input.bits := DontCare
// We translate our integer to floating-point form so that we can use the hardfloat divider
val expWidth = log2Up(self.getWidth) + 1
val sigWidth = self.getWidth
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = in_to_float(self)
// Instantiate the hardloat sqrt
val sqrter = Module(new DivSqrtRecFN_small(expWidth, sigWidth, 0))
input.ready := sqrter.io.inReady
sqrter.io.inValid := input.valid
sqrter.io.sqrtOp := true.B
sqrter.io.a := self_rec
sqrter.io.b := DontCare
sqrter.io.roundingMode := consts.round_minMag
sqrter.io.detectTininess := consts.tininess_afterRounding
output.valid := sqrter.io.outValid_sqrt
output.bits := float_to_in(sqrter.io.out)
assert(!output.valid || output.ready)
Some((input, output))
}
override def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = u match {
case Float(expWidth, sigWidth) =>
val input = Wire(Decoupled(UInt(0.W)))
val output = Wire(Decoupled(u.cloneType))
input.bits := DontCare
// We translate our integer to floating-point form so that we can use the hardfloat divider
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
val self_rec = in_to_float(self)
val one_rec = in_to_float(1.S)
// Instantiate the hardloat divider
val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options))
input.ready := divider.io.inReady
divider.io.inValid := input.valid
divider.io.sqrtOp := false.B
divider.io.a := one_rec
divider.io.b := self_rec
divider.io.roundingMode := consts.round_near_even
divider.io.detectTininess := consts.tininess_afterRounding
output.valid := divider.io.outValid_div
output.bits := fNFromRecFN(expWidth, sigWidth, divider.io.out).asTypeOf(u)
assert(!output.valid || output.ready)
Some((input, output))
case _ => None
}
override def mult_with_reciprocal[U <: Data](reciprocal: U): SInt = reciprocal match {
case recip @ Float(expWidth, sigWidth) =>
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = in_to_float(self)
val reciprocal_rec = recFNFromFN(expWidth, sigWidth, recip.bits)
// Instantiate the hardloat divider
val muladder = Module(new MulRecFN(expWidth, sigWidth))
muladder.io.roundingMode := consts.round_near_even
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := reciprocal_rec
float_to_in(muladder.io.out)
case _ => self
}
}
}
implicit object FloatArithmetic extends Arithmetic[Float] {
// TODO Floating point arithmetic currently switches between recoded and standard formats for every operation. However, it should stay in the recoded format as it travels through the systolic array
override implicit def cast(self: Float): ArithmeticOps[Float] = new ArithmeticOps(self) {
override def *(t: Float): Float = {
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth))
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := t_rec_resized
val out = Wire(Float(self.expWidth, self.sigWidth))
out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
out
}
override def mac(m1: Float, m2: Float): Float = {
// Recode all operands
val m1_rec = recFNFromFN(m1.expWidth, m1.sigWidth, m1.bits)
val m2_rec = recFNFromFN(m2.expWidth, m2.sigWidth, m2.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Resize m1 to self's width
val m1_resizer = Module(new RecFNToRecFN(m1.expWidth, m1.sigWidth, self.expWidth, self.sigWidth))
m1_resizer.io.in := m1_rec
m1_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
m1_resizer.io.detectTininess := consts.tininess_afterRounding
val m1_rec_resized = m1_resizer.io.out
// Resize m2 to self's width
val m2_resizer = Module(new RecFNToRecFN(m2.expWidth, m2.sigWidth, self.expWidth, self.sigWidth))
m2_resizer.io.in := m2_rec
m2_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
m2_resizer.io.detectTininess := consts.tininess_afterRounding
val m2_rec_resized = m2_resizer.io.out
// Perform multiply-add
val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth))
muladder.io.op := 0.U
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := m1_rec_resized
muladder.io.b := m2_rec_resized
muladder.io.c := self_rec
// Convert result to standard format // TODO remove these intermediate recodings
val out = Wire(Float(self.expWidth, self.sigWidth))
out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
out
}
override def +(t: Float): Float = {
require(self.getWidth >= t.getWidth) // This just makes it easier to write the resizing code
// Recode all operands
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Generate 1 as a float
val in_to_rec_fn = Module(new INToRecFN(1, self.expWidth, self.sigWidth))
in_to_rec_fn.io.signedIn := false.B
in_to_rec_fn.io.in := 1.U
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
val one_rec = in_to_rec_fn.io.out
// Resize t
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
// Perform addition
val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth))
muladder.io.op := 0.U
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := t_rec_resized
muladder.io.b := one_rec
muladder.io.c := self_rec
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
result
}
override def -(t: Float): Float = {
val t_sgn = t.bits(t.getWidth-1)
val neg_t = Cat(~t_sgn, t.bits(t.getWidth-2,0)).asTypeOf(t)
self + neg_t
}
override def >>(u: UInt): Float = {
// Recode self
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Get 2^(-u) as a recoded float
val shift_exp = Wire(UInt(self.expWidth.W))
shift_exp := self.bias.U - u
val shift_fn = Cat(0.U(1.W), shift_exp, 0.U((self.sigWidth-1).W))
val shift_rec = recFNFromFN(self.expWidth, self.sigWidth, shift_fn)
assert(shift_exp =/= 0.U, "scaling by denormalized numbers is not currently supported")
// Multiply self and 2^(-u)
val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth))
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := shift_rec
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
result
}
override def >(t: Float): Bool = {
// Recode all operands
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Resize t to self's width
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
val comparator = Module(new CompareRecFN(self.expWidth, self.sigWidth))
comparator.io.a := self_rec
comparator.io.b := t_rec_resized
comparator.io.signaling := false.B
comparator.io.gt
}
override def withWidthOf(t: Float): Float = {
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth))
resizer.io.in := self_rec
resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
resizer.io.detectTininess := consts.tininess_afterRounding
val result = Wire(Float(t.expWidth, t.sigWidth))
result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out)
result
}
override def clippedToWidthOf(t: Float): Float = {
// TODO check for overflow. Right now, we just assume that overflow doesn't happen
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth))
resizer.io.in := self_rec
resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
resizer.io.detectTininess := consts.tininess_afterRounding
val result = Wire(Float(t.expWidth, t.sigWidth))
result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out)
result
}
override def relu: Float = {
val raw = rawFloatFromFN(self.expWidth, self.sigWidth, self.bits)
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := Mux(!raw.isZero && raw.sign, 0.U, self.bits)
result
}
override def zero: Float = 0.U.asTypeOf(self)
override def identity: Float = Cat(0.U(2.W), ~(0.U((self.expWidth-1).W)), 0.U((self.sigWidth-1).W)).asTypeOf(self)
override def minimum: Float = Cat(1.U, ~(0.U(self.expWidth.W)), 0.U((self.sigWidth-1).W)).asTypeOf(self)
}
}
implicit object DummySIntArithmetic extends Arithmetic[DummySInt] {
override implicit def cast(self: DummySInt) = new ArithmeticOps(self) {
override def *(t: DummySInt) = self.dontCare
override def mac(m1: DummySInt, m2: DummySInt) = self.dontCare
override def +(t: DummySInt) = self.dontCare
override def -(t: DummySInt) = self.dontCare
override def >>(t: UInt) = self.dontCare
override def >(t: DummySInt): Bool = false.B
override def identity = self.dontCare
override def withWidthOf(t: DummySInt) = self.dontCare
override def clippedToWidthOf(t: DummySInt) = self.dontCare
override def relu = self.dontCare
override def zero = self.dontCare
override def minimum: DummySInt = self.dontCare
}
}
}
| module MacUnit_95( // @[PE.scala:14:7]
input clock, // @[PE.scala:14:7]
input reset, // @[PE.scala:14:7]
input [7:0] io_in_a, // @[PE.scala:16:14]
input [7:0] io_in_b, // @[PE.scala:16:14]
input [19:0] io_in_c, // @[PE.scala:16:14]
output [19:0] io_out_d // @[PE.scala:16:14]
);
wire [7:0] io_in_a_0 = io_in_a; // @[PE.scala:14:7]
wire [7:0] io_in_b_0 = io_in_b; // @[PE.scala:14:7]
wire [19:0] io_in_c_0 = io_in_c; // @[PE.scala:14:7]
wire [19:0] _io_out_d_T_3; // @[Arithmetic.scala:93:54]
wire [19:0] io_out_d_0; // @[PE.scala:14:7]
wire [15:0] _io_out_d_T = {{8{io_in_a_0[7]}}, io_in_a_0} * {{8{io_in_b_0[7]}}, io_in_b_0}; // @[PE.scala:14:7]
wire [20:0] _io_out_d_T_1 = {{5{_io_out_d_T[15]}}, _io_out_d_T} + {io_in_c_0[19], io_in_c_0}; // @[PE.scala:14:7]
wire [19:0] _io_out_d_T_2 = _io_out_d_T_1[19:0]; // @[Arithmetic.scala:93:54]
assign _io_out_d_T_3 = _io_out_d_T_2; // @[Arithmetic.scala:93:54]
assign io_out_d_0 = _io_out_d_T_3; // @[PE.scala:14:7]
assign io_out_d = io_out_d_0; // @[PE.scala:14:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File PlusArg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.experimental._
import chisel3.util.HasBlackBoxResource
@deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05")
case class PlusArgInfo(default: BigInt, docstring: String)
/** Case class for PlusArg information
*
* @tparam A scala type of the PlusArg value
* @param default optional default value
* @param docstring text to include in the help
* @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT)
*/
private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String)
/** Typeclass for converting a type to a doctype string
* @tparam A some type
*/
trait Doctypeable[A] {
/** Return the doctype string for some option */
def toDoctype(a: Option[A]): String
}
/** Object containing implementations of the Doctypeable typeclass */
object Doctypes {
/** Converts an Int => "INT" */
implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" }
/** Converts a BigInt => "INT" */
implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" }
/** Converts a String => "STRING" */
implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" }
}
class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map(
"FORMAT" -> StringParam(format),
"DEFAULT" -> IntParam(default),
"WIDTH" -> IntParam(width)
)) with HasBlackBoxResource {
val io = IO(new Bundle {
val out = Output(UInt(width.W))
})
addResource("/vsrc/plusarg_reader.v")
}
/* This wrapper class has no outputs, making it clear it is a simulation-only construct */
class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module {
val io = IO(new Bundle {
val count = Input(UInt(width.W))
})
val max = Module(new plusarg_reader(format, default, docstring, width)).io.out
when (max > 0.U) {
assert (io.count < max, s"Timeout exceeded: $docstring")
}
}
import Doctypes._
object PlusArg
{
/** PlusArg("foo") will return 42.U if the simulation is run with +foo=42
* Do not use this as an initial register value. The value is set in an
* initial block and thus accessing it from another initial is racey.
* Add a docstring to document the arg, which can be dumped in an elaboration
* pass.
*/
def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out
}
/** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert
* to kill the simulation when count exceeds the specified integer argument.
* Default 0 will never assert.
*/
def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count
}
}
object PlusArgArtefacts {
private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty
/* Add a new PlusArg */
@deprecated(
"Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08",
"Rocket Chip 2020.05"
)
def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring)
/** Add a new PlusArg
*
* @tparam A scala type of the PlusArg value
* @param name name for the PlusArg
* @param default optional default value
* @param docstring text to include in the help
*/
def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit =
artefacts = artefacts ++
Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default)))
/* From plus args, generate help text */
private def serializeHelp_cHeader(tab: String = ""): String = artefacts
.map{ case(arg, info) =>
s"""|$tab+$arg=${info.doctype}\\n\\
|$tab${" "*20}${info.docstring}\\n\\
|""".stripMargin ++ info.default.map{ case default =>
s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("")
}.toSeq.mkString("\\n\\\n") ++ "\""
/* From plus args, generate a char array of their names */
private def serializeArray_cHeader(tab: String = ""): String = {
val prettyTab = tab + " " * 44 // Length of 'static const ...'
s"${tab}static const char * verilog_plusargs [] = {\\\n" ++
artefacts
.map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" }
.mkString("")++
s"${prettyTab}0};"
}
/* Generate C code to be included in emulator.cc that helps with
* argument parsing based on available Verilog PlusArgs */
def serialize_cHeader(): String =
s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\
|${serializeHelp_cHeader(" "*7)}
|${serializeArray_cHeader()}
|""".stripMargin
}
File Nodes.scala:
package constellation.channel
import chisel3._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.{Parameters, Field}
import freechips.rocketchip.diplomacy._
case class EmptyParams()
case class ChannelEdgeParams(cp: ChannelParams, p: Parameters)
object ChannelImp extends SimpleNodeImp[EmptyParams, ChannelParams, ChannelEdgeParams, Channel] {
def edge(pd: EmptyParams, pu: ChannelParams, p: Parameters, sourceInfo: SourceInfo) = {
ChannelEdgeParams(pu, p)
}
def bundle(e: ChannelEdgeParams) = new Channel(e.cp)(e.p)
def render(e: ChannelEdgeParams) = if (e.cp.possibleFlows.size == 0) {
RenderedEdge(colour = "ffffff", label = "X")
} else {
RenderedEdge(colour = "#0000ff", label = e.cp.payloadBits.toString)
}
override def monitor(bundle: Channel, edge: ChannelEdgeParams): Unit = {
val monitor = Module(new NoCMonitor(edge.cp)(edge.p))
monitor.io.in := bundle
}
// TODO: Add nodepath stuff? override def mixO, override def mixI
}
case class ChannelSourceNode(val destId: Int)(implicit valName: ValName) extends SourceNode(ChannelImp)(Seq(EmptyParams()))
case class ChannelDestNode(val destParams: ChannelParams)(implicit valName: ValName) extends SinkNode(ChannelImp)(Seq(destParams))
case class ChannelAdapterNode(
slaveFn: ChannelParams => ChannelParams = { d => d })(
implicit valName: ValName) extends AdapterNode(ChannelImp)((e: EmptyParams) => e, slaveFn)
case class ChannelIdentityNode()(implicit valName: ValName) extends IdentityNode(ChannelImp)()
case class ChannelEphemeralNode()(implicit valName: ValName) extends EphemeralNode(ChannelImp)()
case class IngressChannelEdgeParams(cp: IngressChannelParams, p: Parameters)
case class EgressChannelEdgeParams(cp: EgressChannelParams, p: Parameters)
object IngressChannelImp extends SimpleNodeImp[EmptyParams, IngressChannelParams, IngressChannelEdgeParams, IngressChannel] {
def edge(pd: EmptyParams, pu: IngressChannelParams, p: Parameters, sourceInfo: SourceInfo) = {
IngressChannelEdgeParams(pu, p)
}
def bundle(e: IngressChannelEdgeParams) = new IngressChannel(e.cp)(e.p)
def render(e: IngressChannelEdgeParams) = if (e.cp.possibleFlows.size == 0) {
RenderedEdge(colour = "ffffff", label = "X")
} else {
RenderedEdge(colour = "#00ff00", label = e.cp.payloadBits.toString)
}
}
object EgressChannelImp extends SimpleNodeImp[EmptyParams, EgressChannelParams, EgressChannelEdgeParams, EgressChannel] {
def edge(pd: EmptyParams, pu: EgressChannelParams, p: Parameters, sourceInfo: SourceInfo) = {
EgressChannelEdgeParams(pu, p)
}
def bundle(e: EgressChannelEdgeParams) = new EgressChannel(e.cp)(e.p)
def render(e: EgressChannelEdgeParams) = if (e.cp.possibleFlows.size == 0) {
RenderedEdge(colour = "ffffff", label = "X")
} else {
RenderedEdge(colour = "#ff0000", label = e.cp.payloadBits.toString)
}
}
case class IngressChannelSourceNode(val destId: Int)(implicit valName: ValName) extends SourceNode(IngressChannelImp)(Seq(EmptyParams()))
case class IngressChannelDestNode(val destParams: IngressChannelParams)(implicit valName: ValName) extends SinkNode(IngressChannelImp)(Seq(destParams))
case class EgressChannelSourceNode(val egressId: Int)(implicit valName: ValName) extends SourceNode(EgressChannelImp)(Seq(EmptyParams()))
case class EgressChannelDestNode(val destParams: EgressChannelParams)(implicit valName: ValName) extends SinkNode(EgressChannelImp)(Seq(destParams))
case class IngressChannelAdapterNode(
slaveFn: IngressChannelParams => IngressChannelParams = { d => d })(
implicit valName: ValName) extends AdapterNode(IngressChannelImp)(m => m, slaveFn)
case class EgressChannelAdapterNode(
slaveFn: EgressChannelParams => EgressChannelParams = { d => d })(
implicit valName: ValName) extends AdapterNode(EgressChannelImp)(m => m, slaveFn)
case class IngressChannelIdentityNode()(implicit valName: ValName) extends IdentityNode(IngressChannelImp)()
case class EgressChannelIdentityNode()(implicit valName: ValName) extends IdentityNode(EgressChannelImp)()
case class IngressChannelEphemeralNode()(implicit valName: ValName) extends EphemeralNode(IngressChannelImp)()
case class EgressChannelEphemeralNode()(implicit valName: ValName) extends EphemeralNode(EgressChannelImp)()
File Router.scala:
package constellation.router
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.{Field, Parameters}
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.util._
import constellation.channel._
import constellation.routing.{RoutingRelation}
import constellation.noc.{HasNoCParams}
case class UserRouterParams(
// Payload width. Must match payload width on all channels attached to this routing node
payloadBits: Int = 64,
// Combines SA and ST stages (removes pipeline register)
combineSAST: Boolean = false,
// Combines RC and VA stages (removes pipeline register)
combineRCVA: Boolean = false,
// Adds combinational path from SA to VA
coupleSAVA: Boolean = false,
vcAllocator: VCAllocatorParams => Parameters => VCAllocator = (vP) => (p) => new RotatingSingleVCAllocator(vP)(p)
)
case class RouterParams(
nodeId: Int,
nIngress: Int,
nEgress: Int,
user: UserRouterParams
)
trait HasRouterOutputParams {
def outParams: Seq[ChannelParams]
def egressParams: Seq[EgressChannelParams]
def allOutParams = outParams ++ egressParams
def nOutputs = outParams.size
def nEgress = egressParams.size
def nAllOutputs = allOutParams.size
}
trait HasRouterInputParams {
def inParams: Seq[ChannelParams]
def ingressParams: Seq[IngressChannelParams]
def allInParams = inParams ++ ingressParams
def nInputs = inParams.size
def nIngress = ingressParams.size
def nAllInputs = allInParams.size
}
trait HasRouterParams
{
def routerParams: RouterParams
def nodeId = routerParams.nodeId
def payloadBits = routerParams.user.payloadBits
}
class DebugBundle(val nIn: Int) extends Bundle {
val va_stall = Vec(nIn, UInt())
val sa_stall = Vec(nIn, UInt())
}
class Router(
val routerParams: RouterParams,
preDiplomaticInParams: Seq[ChannelParams],
preDiplomaticIngressParams: Seq[IngressChannelParams],
outDests: Seq[Int],
egressIds: Seq[Int]
)(implicit p: Parameters) extends LazyModule with HasNoCParams with HasRouterParams {
val allPreDiplomaticInParams = preDiplomaticInParams ++ preDiplomaticIngressParams
val destNodes = preDiplomaticInParams.map(u => ChannelDestNode(u))
val sourceNodes = outDests.map(u => ChannelSourceNode(u))
val ingressNodes = preDiplomaticIngressParams.map(u => IngressChannelDestNode(u))
val egressNodes = egressIds.map(u => EgressChannelSourceNode(u))
val debugNode = BundleBridgeSource(() => new DebugBundle(allPreDiplomaticInParams.size))
val ctrlNode = if (hasCtrl) Some(BundleBridgeSource(() => new RouterCtrlBundle)) else None
def inParams = module.inParams
def outParams = module.outParams
def ingressParams = module.ingressParams
def egressParams = module.egressParams
lazy val module = new LazyModuleImp(this) with HasRouterInputParams with HasRouterOutputParams {
val (io_in, edgesIn) = destNodes.map(_.in(0)).unzip
val (io_out, edgesOut) = sourceNodes.map(_.out(0)).unzip
val (io_ingress, edgesIngress) = ingressNodes.map(_.in(0)).unzip
val (io_egress, edgesEgress) = egressNodes.map(_.out(0)).unzip
val io_debug = debugNode.out(0)._1
val inParams = edgesIn.map(_.cp)
val outParams = edgesOut.map(_.cp)
val ingressParams = edgesIngress.map(_.cp)
val egressParams = edgesEgress.map(_.cp)
allOutParams.foreach(u => require(u.srcId == nodeId && u.payloadBits == routerParams.user.payloadBits))
allInParams.foreach(u => require(u.destId == nodeId && u.payloadBits == routerParams.user.payloadBits))
require(nIngress == routerParams.nIngress)
require(nEgress == routerParams.nEgress)
require(nAllInputs >= 1)
require(nAllOutputs >= 1)
require(nodeId < (1 << nodeIdBits))
val input_units = inParams.zipWithIndex.map { case (u,i) =>
Module(new InputUnit(u, outParams, egressParams,
routerParams.user.combineRCVA, routerParams.user.combineSAST))
.suggestName(s"input_unit_${i}_from_${u.srcId}") }
val ingress_units = ingressParams.zipWithIndex.map { case (u,i) =>
Module(new IngressUnit(i, u, outParams, egressParams,
routerParams.user.combineRCVA, routerParams.user.combineSAST))
.suggestName(s"ingress_unit_${i+nInputs}_from_${u.ingressId}") }
val all_input_units = input_units ++ ingress_units
val output_units = outParams.zipWithIndex.map { case (u,i) =>
Module(new OutputUnit(inParams, ingressParams, u))
.suggestName(s"output_unit_${i}_to_${u.destId}")}
val egress_units = egressParams.zipWithIndex.map { case (u,i) =>
Module(new EgressUnit(routerParams.user.coupleSAVA && all_input_units.size == 1,
routerParams.user.combineSAST,
inParams, ingressParams, u))
.suggestName(s"egress_unit_${i+nOutputs}_to_${u.egressId}")}
val all_output_units = output_units ++ egress_units
val switch = Module(new Switch(routerParams, inParams, outParams, ingressParams, egressParams))
val switch_allocator = Module(new SwitchAllocator(routerParams, inParams, outParams, ingressParams, egressParams))
val vc_allocator = Module(routerParams.user.vcAllocator(
VCAllocatorParams(routerParams, inParams, outParams, ingressParams, egressParams)
)(p))
val route_computer = Module(new RouteComputer(routerParams, inParams, outParams, ingressParams, egressParams))
val fires_count = WireInit(PopCount(vc_allocator.io.req.map(_.fire)))
dontTouch(fires_count)
(io_in zip input_units ).foreach { case (i,u) => u.io.in <> i }
(io_ingress zip ingress_units).foreach { case (i,u) => u.io.in <> i.flit }
(output_units zip io_out ).foreach { case (u,o) => o <> u.io.out }
(egress_units zip io_egress).foreach { case (u,o) => o.flit <> u.io.out }
(route_computer.io.req zip all_input_units).foreach {
case (i,u) => i <> u.io.router_req }
(all_input_units zip route_computer.io.resp).foreach {
case (u,o) => u.io.router_resp <> o }
(vc_allocator.io.req zip all_input_units).foreach {
case (i,u) => i <> u.io.vcalloc_req }
(all_input_units zip vc_allocator.io.resp).foreach {
case (u,o) => u.io.vcalloc_resp <> o }
(all_output_units zip vc_allocator.io.out_allocs).foreach {
case (u,a) => u.io.allocs <> a }
(vc_allocator.io.channel_status zip all_output_units).foreach {
case (a,u) => a := u.io.channel_status }
all_input_units.foreach(in => all_output_units.zipWithIndex.foreach { case (out,outIdx) =>
in.io.out_credit_available(outIdx) := out.io.credit_available
})
(all_input_units zip switch_allocator.io.req).foreach {
case (u,r) => r <> u.io.salloc_req }
(all_output_units zip switch_allocator.io.credit_alloc).foreach {
case (u,a) => u.io.credit_alloc := a }
(switch.io.in zip all_input_units).foreach {
case (i,u) => i <> u.io.out }
(all_output_units zip switch.io.out).foreach {
case (u,o) => u.io.in <> o }
switch.io.sel := (if (routerParams.user.combineSAST) {
switch_allocator.io.switch_sel
} else {
RegNext(switch_allocator.io.switch_sel)
})
if (hasCtrl) {
val io_ctrl = ctrlNode.get.out(0)._1
val ctrl = Module(new RouterControlUnit(routerParams, inParams, outParams, ingressParams, egressParams))
io_ctrl <> ctrl.io.ctrl
(all_input_units zip ctrl.io.in_block ).foreach { case (l,r) => l.io.block := r }
(all_input_units zip ctrl.io.in_fire ).foreach { case (l,r) => r := l.io.out.map(_.valid) }
} else {
input_units.foreach(_.io.block := false.B)
ingress_units.foreach(_.io.block := false.B)
}
(io_debug.va_stall zip all_input_units.map(_.io.debug.va_stall)).map { case (l,r) => l := r }
(io_debug.sa_stall zip all_input_units.map(_.io.debug.sa_stall)).map { case (l,r) => l := r }
val debug_tsc = RegInit(0.U(64.W))
debug_tsc := debug_tsc + 1.U
val debug_sample = RegInit(0.U(64.W))
debug_sample := debug_sample + 1.U
val sample_rate = PlusArg("noc_util_sample_rate", width=20)
when (debug_sample === sample_rate - 1.U) { debug_sample := 0.U }
def sample(fire: Bool, s: String) = {
val util_ctr = RegInit(0.U(64.W))
val fired = RegInit(false.B)
util_ctr := util_ctr + fire
fired := fired || fire
when (sample_rate =/= 0.U && debug_sample === sample_rate - 1.U && fired) {
val fmtStr = s"nocsample %d $s %d\n"
printf(fmtStr, debug_tsc, util_ctr);
fired := fire
}
}
destNodes.map(_.in(0)).foreach { case (in, edge) => in.flit.map { f =>
sample(f.fire, s"${edge.cp.srcId} $nodeId")
} }
ingressNodes.map(_.in(0)).foreach { case (in, edge) =>
sample(in.flit.fire, s"i${edge.cp.asInstanceOf[IngressChannelParams].ingressId} $nodeId")
}
egressNodes.map(_.out(0)).foreach { case (out, edge) =>
sample(out.flit.fire, s"$nodeId e${edge.cp.asInstanceOf[EgressChannelParams].egressId}")
}
}
}
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
| module Router_5( // @[Router.scala:89:25]
input clock, // @[Router.scala:89:25]
input reset, // @[Router.scala:89:25]
output [1:0] auto_debug_out_va_stall_0, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_debug_out_va_stall_1, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_debug_out_va_stall_2, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_debug_out_va_stall_3, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_debug_out_va_stall_4, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_debug_out_sa_stall_0, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_debug_out_sa_stall_1, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_debug_out_sa_stall_2, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_debug_out_sa_stall_3, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_debug_out_sa_stall_4, // @[LazyModuleImp.scala:107:25]
input auto_egress_nodes_out_1_flit_ready, // @[LazyModuleImp.scala:107:25]
output auto_egress_nodes_out_1_flit_valid, // @[LazyModuleImp.scala:107:25]
output auto_egress_nodes_out_1_flit_bits_head, // @[LazyModuleImp.scala:107:25]
output auto_egress_nodes_out_1_flit_bits_tail, // @[LazyModuleImp.scala:107:25]
output [144:0] auto_egress_nodes_out_1_flit_bits_payload, // @[LazyModuleImp.scala:107:25]
input auto_egress_nodes_out_0_flit_ready, // @[LazyModuleImp.scala:107:25]
output auto_egress_nodes_out_0_flit_valid, // @[LazyModuleImp.scala:107:25]
output auto_egress_nodes_out_0_flit_bits_head, // @[LazyModuleImp.scala:107:25]
output auto_egress_nodes_out_0_flit_bits_tail, // @[LazyModuleImp.scala:107:25]
output [144:0] auto_egress_nodes_out_0_flit_bits_payload, // @[LazyModuleImp.scala:107:25]
output auto_ingress_nodes_in_flit_ready, // @[LazyModuleImp.scala:107:25]
input auto_ingress_nodes_in_flit_valid, // @[LazyModuleImp.scala:107:25]
input auto_ingress_nodes_in_flit_bits_head, // @[LazyModuleImp.scala:107:25]
input auto_ingress_nodes_in_flit_bits_tail, // @[LazyModuleImp.scala:107:25]
input [144:0] auto_ingress_nodes_in_flit_bits_payload, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_ingress_nodes_in_flit_bits_egress_id, // @[LazyModuleImp.scala:107:25]
output auto_source_nodes_out_3_flit_0_valid, // @[LazyModuleImp.scala:107:25]
output auto_source_nodes_out_3_flit_0_bits_head, // @[LazyModuleImp.scala:107:25]
output auto_source_nodes_out_3_flit_0_bits_tail, // @[LazyModuleImp.scala:107:25]
output [144:0] auto_source_nodes_out_3_flit_0_bits_payload, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_source_nodes_out_3_flit_0_bits_flow_vnet_id, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_source_nodes_out_3_flit_0_bits_flow_ingress_node, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_source_nodes_out_3_flit_0_bits_flow_ingress_node_id, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_source_nodes_out_3_flit_0_bits_flow_egress_node, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_source_nodes_out_3_flit_0_bits_flow_egress_node_id, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_source_nodes_out_3_flit_0_bits_virt_channel_id, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_source_nodes_out_3_credit_return, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_source_nodes_out_3_vc_free, // @[LazyModuleImp.scala:107:25]
output auto_source_nodes_out_2_flit_0_valid, // @[LazyModuleImp.scala:107:25]
output auto_source_nodes_out_2_flit_0_bits_head, // @[LazyModuleImp.scala:107:25]
output auto_source_nodes_out_2_flit_0_bits_tail, // @[LazyModuleImp.scala:107:25]
output [144:0] auto_source_nodes_out_2_flit_0_bits_payload, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_source_nodes_out_2_flit_0_bits_flow_vnet_id, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_source_nodes_out_2_flit_0_bits_flow_ingress_node, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_source_nodes_out_2_flit_0_bits_flow_ingress_node_id, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_source_nodes_out_2_flit_0_bits_flow_egress_node, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_source_nodes_out_2_flit_0_bits_flow_egress_node_id, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_source_nodes_out_2_flit_0_bits_virt_channel_id, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_source_nodes_out_2_credit_return, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_source_nodes_out_2_vc_free, // @[LazyModuleImp.scala:107:25]
output auto_source_nodes_out_1_flit_0_valid, // @[LazyModuleImp.scala:107:25]
output auto_source_nodes_out_1_flit_0_bits_head, // @[LazyModuleImp.scala:107:25]
output auto_source_nodes_out_1_flit_0_bits_tail, // @[LazyModuleImp.scala:107:25]
output [144:0] auto_source_nodes_out_1_flit_0_bits_payload, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_source_nodes_out_1_flit_0_bits_flow_vnet_id, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_source_nodes_out_1_flit_0_bits_flow_ingress_node, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_source_nodes_out_1_flit_0_bits_flow_ingress_node_id, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_source_nodes_out_1_flit_0_bits_flow_egress_node, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_source_nodes_out_1_flit_0_bits_flow_egress_node_id, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_source_nodes_out_1_flit_0_bits_virt_channel_id, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_source_nodes_out_1_credit_return, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_source_nodes_out_1_vc_free, // @[LazyModuleImp.scala:107:25]
output auto_source_nodes_out_0_flit_0_valid, // @[LazyModuleImp.scala:107:25]
output auto_source_nodes_out_0_flit_0_bits_head, // @[LazyModuleImp.scala:107:25]
output auto_source_nodes_out_0_flit_0_bits_tail, // @[LazyModuleImp.scala:107:25]
output [144:0] auto_source_nodes_out_0_flit_0_bits_payload, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_source_nodes_out_0_flit_0_bits_flow_vnet_id, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_source_nodes_out_0_flit_0_bits_flow_ingress_node, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_source_nodes_out_0_flit_0_bits_flow_ingress_node_id, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_source_nodes_out_0_flit_0_bits_flow_egress_node, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_source_nodes_out_0_flit_0_bits_flow_egress_node_id, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_source_nodes_out_0_flit_0_bits_virt_channel_id, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_source_nodes_out_0_credit_return, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_source_nodes_out_0_vc_free, // @[LazyModuleImp.scala:107:25]
input auto_dest_nodes_in_3_flit_0_valid, // @[LazyModuleImp.scala:107:25]
input auto_dest_nodes_in_3_flit_0_bits_head, // @[LazyModuleImp.scala:107:25]
input auto_dest_nodes_in_3_flit_0_bits_tail, // @[LazyModuleImp.scala:107:25]
input [144:0] auto_dest_nodes_in_3_flit_0_bits_payload, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_dest_nodes_in_3_flit_0_bits_flow_vnet_id, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_dest_nodes_in_3_flit_0_bits_flow_ingress_node, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_dest_nodes_in_3_flit_0_bits_flow_ingress_node_id, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_dest_nodes_in_3_flit_0_bits_flow_egress_node, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_dest_nodes_in_3_flit_0_bits_flow_egress_node_id, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_dest_nodes_in_3_flit_0_bits_virt_channel_id, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_dest_nodes_in_3_credit_return, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_dest_nodes_in_3_vc_free, // @[LazyModuleImp.scala:107:25]
input auto_dest_nodes_in_2_flit_0_valid, // @[LazyModuleImp.scala:107:25]
input auto_dest_nodes_in_2_flit_0_bits_head, // @[LazyModuleImp.scala:107:25]
input auto_dest_nodes_in_2_flit_0_bits_tail, // @[LazyModuleImp.scala:107:25]
input [144:0] auto_dest_nodes_in_2_flit_0_bits_payload, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_dest_nodes_in_2_flit_0_bits_flow_vnet_id, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_dest_nodes_in_2_flit_0_bits_flow_ingress_node, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_dest_nodes_in_2_flit_0_bits_flow_ingress_node_id, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_dest_nodes_in_2_flit_0_bits_flow_egress_node, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_dest_nodes_in_2_flit_0_bits_flow_egress_node_id, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_dest_nodes_in_2_flit_0_bits_virt_channel_id, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_dest_nodes_in_2_credit_return, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_dest_nodes_in_2_vc_free, // @[LazyModuleImp.scala:107:25]
input auto_dest_nodes_in_1_flit_0_valid, // @[LazyModuleImp.scala:107:25]
input auto_dest_nodes_in_1_flit_0_bits_head, // @[LazyModuleImp.scala:107:25]
input auto_dest_nodes_in_1_flit_0_bits_tail, // @[LazyModuleImp.scala:107:25]
input [144:0] auto_dest_nodes_in_1_flit_0_bits_payload, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_dest_nodes_in_1_flit_0_bits_flow_vnet_id, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_dest_nodes_in_1_flit_0_bits_flow_ingress_node, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_dest_nodes_in_1_flit_0_bits_flow_ingress_node_id, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_dest_nodes_in_1_flit_0_bits_flow_egress_node, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_dest_nodes_in_1_flit_0_bits_flow_egress_node_id, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_dest_nodes_in_1_flit_0_bits_virt_channel_id, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_dest_nodes_in_1_credit_return, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_dest_nodes_in_1_vc_free, // @[LazyModuleImp.scala:107:25]
input auto_dest_nodes_in_0_flit_0_valid, // @[LazyModuleImp.scala:107:25]
input auto_dest_nodes_in_0_flit_0_bits_head, // @[LazyModuleImp.scala:107:25]
input auto_dest_nodes_in_0_flit_0_bits_tail, // @[LazyModuleImp.scala:107:25]
input [144:0] auto_dest_nodes_in_0_flit_0_bits_payload, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_dest_nodes_in_0_flit_0_bits_flow_vnet_id, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_dest_nodes_in_0_flit_0_bits_flow_ingress_node, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_dest_nodes_in_0_flit_0_bits_flow_ingress_node_id, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_dest_nodes_in_0_flit_0_bits_flow_egress_node, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_dest_nodes_in_0_flit_0_bits_flow_egress_node_id, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_dest_nodes_in_0_flit_0_bits_virt_channel_id, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_dest_nodes_in_0_credit_return, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_dest_nodes_in_0_vc_free // @[LazyModuleImp.scala:107:25]
);
wire [19:0] _plusarg_reader_out; // @[PlusArg.scala:80:11]
wire _route_computer_io_resp_4_vc_sel_3_0; // @[Router.scala:136:32]
wire _route_computer_io_resp_4_vc_sel_3_1; // @[Router.scala:136:32]
wire _route_computer_io_resp_4_vc_sel_3_2; // @[Router.scala:136:32]
wire _route_computer_io_resp_4_vc_sel_2_0; // @[Router.scala:136:32]
wire _route_computer_io_resp_4_vc_sel_2_1; // @[Router.scala:136:32]
wire _route_computer_io_resp_4_vc_sel_2_2; // @[Router.scala:136:32]
wire _route_computer_io_resp_4_vc_sel_1_0; // @[Router.scala:136:32]
wire _route_computer_io_resp_4_vc_sel_1_1; // @[Router.scala:136:32]
wire _route_computer_io_resp_4_vc_sel_1_2; // @[Router.scala:136:32]
wire _route_computer_io_resp_4_vc_sel_0_0; // @[Router.scala:136:32]
wire _route_computer_io_resp_4_vc_sel_0_1; // @[Router.scala:136:32]
wire _route_computer_io_resp_4_vc_sel_0_2; // @[Router.scala:136:32]
wire _route_computer_io_resp_3_vc_sel_3_0; // @[Router.scala:136:32]
wire _route_computer_io_resp_3_vc_sel_3_1; // @[Router.scala:136:32]
wire _route_computer_io_resp_3_vc_sel_3_2; // @[Router.scala:136:32]
wire _route_computer_io_resp_3_vc_sel_2_0; // @[Router.scala:136:32]
wire _route_computer_io_resp_3_vc_sel_2_1; // @[Router.scala:136:32]
wire _route_computer_io_resp_3_vc_sel_2_2; // @[Router.scala:136:32]
wire _route_computer_io_resp_3_vc_sel_1_0; // @[Router.scala:136:32]
wire _route_computer_io_resp_3_vc_sel_1_1; // @[Router.scala:136:32]
wire _route_computer_io_resp_3_vc_sel_1_2; // @[Router.scala:136:32]
wire _route_computer_io_resp_3_vc_sel_0_0; // @[Router.scala:136:32]
wire _route_computer_io_resp_3_vc_sel_0_1; // @[Router.scala:136:32]
wire _route_computer_io_resp_3_vc_sel_0_2; // @[Router.scala:136:32]
wire _route_computer_io_resp_2_vc_sel_3_0; // @[Router.scala:136:32]
wire _route_computer_io_resp_2_vc_sel_3_1; // @[Router.scala:136:32]
wire _route_computer_io_resp_2_vc_sel_3_2; // @[Router.scala:136:32]
wire _route_computer_io_resp_2_vc_sel_2_0; // @[Router.scala:136:32]
wire _route_computer_io_resp_2_vc_sel_2_1; // @[Router.scala:136:32]
wire _route_computer_io_resp_2_vc_sel_2_2; // @[Router.scala:136:32]
wire _route_computer_io_resp_2_vc_sel_1_0; // @[Router.scala:136:32]
wire _route_computer_io_resp_2_vc_sel_1_1; // @[Router.scala:136:32]
wire _route_computer_io_resp_2_vc_sel_1_2; // @[Router.scala:136:32]
wire _route_computer_io_resp_2_vc_sel_0_0; // @[Router.scala:136:32]
wire _route_computer_io_resp_2_vc_sel_0_1; // @[Router.scala:136:32]
wire _route_computer_io_resp_2_vc_sel_0_2; // @[Router.scala:136:32]
wire _route_computer_io_resp_1_vc_sel_3_0; // @[Router.scala:136:32]
wire _route_computer_io_resp_1_vc_sel_3_1; // @[Router.scala:136:32]
wire _route_computer_io_resp_1_vc_sel_3_2; // @[Router.scala:136:32]
wire _route_computer_io_resp_1_vc_sel_2_0; // @[Router.scala:136:32]
wire _route_computer_io_resp_1_vc_sel_2_1; // @[Router.scala:136:32]
wire _route_computer_io_resp_1_vc_sel_2_2; // @[Router.scala:136:32]
wire _route_computer_io_resp_1_vc_sel_1_0; // @[Router.scala:136:32]
wire _route_computer_io_resp_1_vc_sel_1_1; // @[Router.scala:136:32]
wire _route_computer_io_resp_1_vc_sel_1_2; // @[Router.scala:136:32]
wire _route_computer_io_resp_1_vc_sel_0_0; // @[Router.scala:136:32]
wire _route_computer_io_resp_1_vc_sel_0_1; // @[Router.scala:136:32]
wire _route_computer_io_resp_1_vc_sel_0_2; // @[Router.scala:136:32]
wire _route_computer_io_resp_0_vc_sel_3_0; // @[Router.scala:136:32]
wire _route_computer_io_resp_0_vc_sel_3_1; // @[Router.scala:136:32]
wire _route_computer_io_resp_0_vc_sel_3_2; // @[Router.scala:136:32]
wire _route_computer_io_resp_0_vc_sel_2_0; // @[Router.scala:136:32]
wire _route_computer_io_resp_0_vc_sel_2_1; // @[Router.scala:136:32]
wire _route_computer_io_resp_0_vc_sel_2_2; // @[Router.scala:136:32]
wire _route_computer_io_resp_0_vc_sel_1_0; // @[Router.scala:136:32]
wire _route_computer_io_resp_0_vc_sel_1_1; // @[Router.scala:136:32]
wire _route_computer_io_resp_0_vc_sel_1_2; // @[Router.scala:136:32]
wire _route_computer_io_resp_0_vc_sel_0_0; // @[Router.scala:136:32]
wire _route_computer_io_resp_0_vc_sel_0_1; // @[Router.scala:136:32]
wire _route_computer_io_resp_0_vc_sel_0_2; // @[Router.scala:136:32]
wire _vc_allocator_io_req_4_ready; // @[Router.scala:133:30]
wire _vc_allocator_io_req_3_ready; // @[Router.scala:133:30]
wire _vc_allocator_io_req_2_ready; // @[Router.scala:133:30]
wire _vc_allocator_io_req_1_ready; // @[Router.scala:133:30]
wire _vc_allocator_io_req_0_ready; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_4_vc_sel_5_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_4_vc_sel_4_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_4_vc_sel_3_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_4_vc_sel_3_1; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_4_vc_sel_3_2; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_4_vc_sel_2_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_4_vc_sel_2_1; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_4_vc_sel_2_2; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_4_vc_sel_1_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_4_vc_sel_1_1; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_4_vc_sel_1_2; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_4_vc_sel_0_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_4_vc_sel_0_1; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_4_vc_sel_0_2; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_3_vc_sel_5_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_3_vc_sel_4_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_3_vc_sel_0_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_2_vc_sel_5_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_2_vc_sel_4_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_2_vc_sel_3_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_2_vc_sel_3_1; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_2_vc_sel_3_2; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_2_vc_sel_1_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_2_vc_sel_1_2; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_2_vc_sel_0_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_5_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_4_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_3_1; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_3_2; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_2_1; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_2_2; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_0_vc_sel_5_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_0_vc_sel_4_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_0_vc_sel_3_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_0_vc_sel_3_1; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_0_vc_sel_3_2; // @[Router.scala:133:30]
wire _vc_allocator_io_out_allocs_5_0_alloc; // @[Router.scala:133:30]
wire _vc_allocator_io_out_allocs_4_0_alloc; // @[Router.scala:133:30]
wire _vc_allocator_io_out_allocs_3_0_alloc; // @[Router.scala:133:30]
wire _vc_allocator_io_out_allocs_3_1_alloc; // @[Router.scala:133:30]
wire _vc_allocator_io_out_allocs_3_2_alloc; // @[Router.scala:133:30]
wire _vc_allocator_io_out_allocs_2_0_alloc; // @[Router.scala:133:30]
wire _vc_allocator_io_out_allocs_2_1_alloc; // @[Router.scala:133:30]
wire _vc_allocator_io_out_allocs_2_2_alloc; // @[Router.scala:133:30]
wire _vc_allocator_io_out_allocs_1_0_alloc; // @[Router.scala:133:30]
wire _vc_allocator_io_out_allocs_1_2_alloc; // @[Router.scala:133:30]
wire _vc_allocator_io_out_allocs_0_0_alloc; // @[Router.scala:133:30]
wire _switch_allocator_io_req_4_0_ready; // @[Router.scala:132:34]
wire _switch_allocator_io_req_3_0_ready; // @[Router.scala:132:34]
wire _switch_allocator_io_req_2_0_ready; // @[Router.scala:132:34]
wire _switch_allocator_io_req_1_0_ready; // @[Router.scala:132:34]
wire _switch_allocator_io_req_0_0_ready; // @[Router.scala:132:34]
wire _switch_allocator_io_credit_alloc_5_0_alloc; // @[Router.scala:132:34]
wire _switch_allocator_io_credit_alloc_5_0_tail; // @[Router.scala:132:34]
wire _switch_allocator_io_credit_alloc_4_0_alloc; // @[Router.scala:132:34]
wire _switch_allocator_io_credit_alloc_4_0_tail; // @[Router.scala:132:34]
wire _switch_allocator_io_credit_alloc_3_0_alloc; // @[Router.scala:132:34]
wire _switch_allocator_io_credit_alloc_3_1_alloc; // @[Router.scala:132:34]
wire _switch_allocator_io_credit_alloc_3_2_alloc; // @[Router.scala:132:34]
wire _switch_allocator_io_credit_alloc_2_0_alloc; // @[Router.scala:132:34]
wire _switch_allocator_io_credit_alloc_2_1_alloc; // @[Router.scala:132:34]
wire _switch_allocator_io_credit_alloc_2_2_alloc; // @[Router.scala:132:34]
wire _switch_allocator_io_credit_alloc_1_0_alloc; // @[Router.scala:132:34]
wire _switch_allocator_io_credit_alloc_1_2_alloc; // @[Router.scala:132:34]
wire _switch_allocator_io_credit_alloc_0_0_alloc; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_5_0_4_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_5_0_3_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_5_0_2_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_5_0_1_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_5_0_0_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_4_0_4_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_4_0_3_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_4_0_2_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_4_0_1_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_4_0_0_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_3_0_4_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_3_0_3_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_3_0_2_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_3_0_1_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_3_0_0_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_2_0_4_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_2_0_3_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_2_0_2_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_2_0_1_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_2_0_0_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_1_0_4_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_1_0_3_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_1_0_2_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_1_0_1_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_1_0_0_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_0_0_4_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_0_0_3_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_0_0_2_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_0_0_1_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_0_0_0_0; // @[Router.scala:132:34]
wire _switch_io_out_5_0_valid; // @[Router.scala:131:24]
wire _switch_io_out_5_0_bits_head; // @[Router.scala:131:24]
wire _switch_io_out_5_0_bits_tail; // @[Router.scala:131:24]
wire [144:0] _switch_io_out_5_0_bits_payload; // @[Router.scala:131:24]
wire [3:0] _switch_io_out_5_0_bits_flow_ingress_node; // @[Router.scala:131:24]
wire [2:0] _switch_io_out_5_0_bits_flow_ingress_node_id; // @[Router.scala:131:24]
wire _switch_io_out_4_0_valid; // @[Router.scala:131:24]
wire _switch_io_out_4_0_bits_head; // @[Router.scala:131:24]
wire _switch_io_out_4_0_bits_tail; // @[Router.scala:131:24]
wire [144:0] _switch_io_out_4_0_bits_payload; // @[Router.scala:131:24]
wire [3:0] _switch_io_out_4_0_bits_flow_ingress_node; // @[Router.scala:131:24]
wire [2:0] _switch_io_out_4_0_bits_flow_ingress_node_id; // @[Router.scala:131:24]
wire _switch_io_out_3_0_valid; // @[Router.scala:131:24]
wire _switch_io_out_3_0_bits_head; // @[Router.scala:131:24]
wire _switch_io_out_3_0_bits_tail; // @[Router.scala:131:24]
wire [144:0] _switch_io_out_3_0_bits_payload; // @[Router.scala:131:24]
wire [1:0] _switch_io_out_3_0_bits_flow_vnet_id; // @[Router.scala:131:24]
wire [3:0] _switch_io_out_3_0_bits_flow_ingress_node; // @[Router.scala:131:24]
wire [2:0] _switch_io_out_3_0_bits_flow_ingress_node_id; // @[Router.scala:131:24]
wire [3:0] _switch_io_out_3_0_bits_flow_egress_node; // @[Router.scala:131:24]
wire [1:0] _switch_io_out_3_0_bits_flow_egress_node_id; // @[Router.scala:131:24]
wire [1:0] _switch_io_out_3_0_bits_virt_channel_id; // @[Router.scala:131:24]
wire _switch_io_out_2_0_valid; // @[Router.scala:131:24]
wire _switch_io_out_2_0_bits_head; // @[Router.scala:131:24]
wire _switch_io_out_2_0_bits_tail; // @[Router.scala:131:24]
wire [144:0] _switch_io_out_2_0_bits_payload; // @[Router.scala:131:24]
wire [1:0] _switch_io_out_2_0_bits_flow_vnet_id; // @[Router.scala:131:24]
wire [3:0] _switch_io_out_2_0_bits_flow_ingress_node; // @[Router.scala:131:24]
wire [2:0] _switch_io_out_2_0_bits_flow_ingress_node_id; // @[Router.scala:131:24]
wire [3:0] _switch_io_out_2_0_bits_flow_egress_node; // @[Router.scala:131:24]
wire [1:0] _switch_io_out_2_0_bits_flow_egress_node_id; // @[Router.scala:131:24]
wire [1:0] _switch_io_out_2_0_bits_virt_channel_id; // @[Router.scala:131:24]
wire _switch_io_out_1_0_valid; // @[Router.scala:131:24]
wire _switch_io_out_1_0_bits_head; // @[Router.scala:131:24]
wire _switch_io_out_1_0_bits_tail; // @[Router.scala:131:24]
wire [144:0] _switch_io_out_1_0_bits_payload; // @[Router.scala:131:24]
wire [1:0] _switch_io_out_1_0_bits_flow_vnet_id; // @[Router.scala:131:24]
wire [3:0] _switch_io_out_1_0_bits_flow_ingress_node; // @[Router.scala:131:24]
wire [2:0] _switch_io_out_1_0_bits_flow_ingress_node_id; // @[Router.scala:131:24]
wire [3:0] _switch_io_out_1_0_bits_flow_egress_node; // @[Router.scala:131:24]
wire [1:0] _switch_io_out_1_0_bits_flow_egress_node_id; // @[Router.scala:131:24]
wire [1:0] _switch_io_out_1_0_bits_virt_channel_id; // @[Router.scala:131:24]
wire _switch_io_out_0_0_valid; // @[Router.scala:131:24]
wire _switch_io_out_0_0_bits_head; // @[Router.scala:131:24]
wire _switch_io_out_0_0_bits_tail; // @[Router.scala:131:24]
wire [144:0] _switch_io_out_0_0_bits_payload; // @[Router.scala:131:24]
wire [1:0] _switch_io_out_0_0_bits_flow_vnet_id; // @[Router.scala:131:24]
wire [3:0] _switch_io_out_0_0_bits_flow_ingress_node; // @[Router.scala:131:24]
wire [2:0] _switch_io_out_0_0_bits_flow_ingress_node_id; // @[Router.scala:131:24]
wire [3:0] _switch_io_out_0_0_bits_flow_egress_node; // @[Router.scala:131:24]
wire [1:0] _switch_io_out_0_0_bits_flow_egress_node_id; // @[Router.scala:131:24]
wire [1:0] _switch_io_out_0_0_bits_virt_channel_id; // @[Router.scala:131:24]
wire _egress_unit_5_to_16_io_credit_available_0; // @[Router.scala:125:13]
wire _egress_unit_5_to_16_io_channel_status_0_occupied; // @[Router.scala:125:13]
wire _egress_unit_5_to_16_io_out_valid; // @[Router.scala:125:13]
wire _egress_unit_4_to_15_io_credit_available_0; // @[Router.scala:125:13]
wire _egress_unit_4_to_15_io_channel_status_0_occupied; // @[Router.scala:125:13]
wire _egress_unit_4_to_15_io_out_valid; // @[Router.scala:125:13]
wire _output_unit_3_to_9_io_credit_available_0; // @[Router.scala:122:13]
wire _output_unit_3_to_9_io_credit_available_1; // @[Router.scala:122:13]
wire _output_unit_3_to_9_io_credit_available_2; // @[Router.scala:122:13]
wire _output_unit_3_to_9_io_channel_status_0_occupied; // @[Router.scala:122:13]
wire _output_unit_3_to_9_io_channel_status_1_occupied; // @[Router.scala:122:13]
wire _output_unit_3_to_9_io_channel_status_2_occupied; // @[Router.scala:122:13]
wire _output_unit_2_to_6_io_credit_available_0; // @[Router.scala:122:13]
wire _output_unit_2_to_6_io_credit_available_1; // @[Router.scala:122:13]
wire _output_unit_2_to_6_io_credit_available_2; // @[Router.scala:122:13]
wire _output_unit_2_to_6_io_channel_status_0_occupied; // @[Router.scala:122:13]
wire _output_unit_2_to_6_io_channel_status_1_occupied; // @[Router.scala:122:13]
wire _output_unit_2_to_6_io_channel_status_2_occupied; // @[Router.scala:122:13]
wire _output_unit_1_to_4_io_credit_available_0; // @[Router.scala:122:13]
wire _output_unit_1_to_4_io_credit_available_2; // @[Router.scala:122:13]
wire _output_unit_1_to_4_io_channel_status_0_occupied; // @[Router.scala:122:13]
wire _output_unit_1_to_4_io_channel_status_2_occupied; // @[Router.scala:122:13]
wire _output_unit_0_to_1_io_credit_available_0; // @[Router.scala:122:13]
wire _output_unit_0_to_1_io_channel_status_0_occupied; // @[Router.scala:122:13]
wire [3:0] _ingress_unit_4_from_27_io_router_req_bits_flow_egress_node; // @[Router.scala:116:13]
wire [1:0] _ingress_unit_4_from_27_io_router_req_bits_flow_egress_node_id; // @[Router.scala:116:13]
wire _ingress_unit_4_from_27_io_vcalloc_req_valid; // @[Router.scala:116:13]
wire _ingress_unit_4_from_27_io_vcalloc_req_bits_vc_sel_5_0; // @[Router.scala:116:13]
wire _ingress_unit_4_from_27_io_vcalloc_req_bits_vc_sel_4_0; // @[Router.scala:116:13]
wire _ingress_unit_4_from_27_io_vcalloc_req_bits_vc_sel_3_0; // @[Router.scala:116:13]
wire _ingress_unit_4_from_27_io_vcalloc_req_bits_vc_sel_3_1; // @[Router.scala:116:13]
wire _ingress_unit_4_from_27_io_vcalloc_req_bits_vc_sel_3_2; // @[Router.scala:116:13]
wire _ingress_unit_4_from_27_io_vcalloc_req_bits_vc_sel_2_0; // @[Router.scala:116:13]
wire _ingress_unit_4_from_27_io_vcalloc_req_bits_vc_sel_2_1; // @[Router.scala:116:13]
wire _ingress_unit_4_from_27_io_vcalloc_req_bits_vc_sel_2_2; // @[Router.scala:116:13]
wire _ingress_unit_4_from_27_io_vcalloc_req_bits_vc_sel_1_0; // @[Router.scala:116:13]
wire _ingress_unit_4_from_27_io_vcalloc_req_bits_vc_sel_1_1; // @[Router.scala:116:13]
wire _ingress_unit_4_from_27_io_vcalloc_req_bits_vc_sel_1_2; // @[Router.scala:116:13]
wire _ingress_unit_4_from_27_io_vcalloc_req_bits_vc_sel_0_0; // @[Router.scala:116:13]
wire _ingress_unit_4_from_27_io_vcalloc_req_bits_vc_sel_0_1; // @[Router.scala:116:13]
wire _ingress_unit_4_from_27_io_vcalloc_req_bits_vc_sel_0_2; // @[Router.scala:116:13]
wire _ingress_unit_4_from_27_io_salloc_req_0_valid; // @[Router.scala:116:13]
wire _ingress_unit_4_from_27_io_salloc_req_0_bits_vc_sel_5_0; // @[Router.scala:116:13]
wire _ingress_unit_4_from_27_io_salloc_req_0_bits_vc_sel_4_0; // @[Router.scala:116:13]
wire _ingress_unit_4_from_27_io_salloc_req_0_bits_vc_sel_3_0; // @[Router.scala:116:13]
wire _ingress_unit_4_from_27_io_salloc_req_0_bits_vc_sel_3_1; // @[Router.scala:116:13]
wire _ingress_unit_4_from_27_io_salloc_req_0_bits_vc_sel_3_2; // @[Router.scala:116:13]
wire _ingress_unit_4_from_27_io_salloc_req_0_bits_vc_sel_2_0; // @[Router.scala:116:13]
wire _ingress_unit_4_from_27_io_salloc_req_0_bits_vc_sel_2_1; // @[Router.scala:116:13]
wire _ingress_unit_4_from_27_io_salloc_req_0_bits_vc_sel_2_2; // @[Router.scala:116:13]
wire _ingress_unit_4_from_27_io_salloc_req_0_bits_vc_sel_1_0; // @[Router.scala:116:13]
wire _ingress_unit_4_from_27_io_salloc_req_0_bits_vc_sel_1_1; // @[Router.scala:116:13]
wire _ingress_unit_4_from_27_io_salloc_req_0_bits_vc_sel_1_2; // @[Router.scala:116:13]
wire _ingress_unit_4_from_27_io_salloc_req_0_bits_vc_sel_0_0; // @[Router.scala:116:13]
wire _ingress_unit_4_from_27_io_salloc_req_0_bits_vc_sel_0_1; // @[Router.scala:116:13]
wire _ingress_unit_4_from_27_io_salloc_req_0_bits_vc_sel_0_2; // @[Router.scala:116:13]
wire _ingress_unit_4_from_27_io_salloc_req_0_bits_tail; // @[Router.scala:116:13]
wire _ingress_unit_4_from_27_io_out_0_valid; // @[Router.scala:116:13]
wire _ingress_unit_4_from_27_io_out_0_bits_flit_head; // @[Router.scala:116:13]
wire _ingress_unit_4_from_27_io_out_0_bits_flit_tail; // @[Router.scala:116:13]
wire [144:0] _ingress_unit_4_from_27_io_out_0_bits_flit_payload; // @[Router.scala:116:13]
wire [1:0] _ingress_unit_4_from_27_io_out_0_bits_flit_flow_vnet_id; // @[Router.scala:116:13]
wire [3:0] _ingress_unit_4_from_27_io_out_0_bits_flit_flow_ingress_node; // @[Router.scala:116:13]
wire [2:0] _ingress_unit_4_from_27_io_out_0_bits_flit_flow_ingress_node_id; // @[Router.scala:116:13]
wire [3:0] _ingress_unit_4_from_27_io_out_0_bits_flit_flow_egress_node; // @[Router.scala:116:13]
wire [1:0] _ingress_unit_4_from_27_io_out_0_bits_flit_flow_egress_node_id; // @[Router.scala:116:13]
wire [1:0] _ingress_unit_4_from_27_io_out_0_bits_out_virt_channel; // @[Router.scala:116:13]
wire _ingress_unit_4_from_27_io_in_ready; // @[Router.scala:116:13]
wire [1:0] _input_unit_3_from_9_io_router_req_bits_src_virt_id; // @[Router.scala:112:13]
wire [1:0] _input_unit_3_from_9_io_router_req_bits_flow_vnet_id; // @[Router.scala:112:13]
wire [3:0] _input_unit_3_from_9_io_router_req_bits_flow_ingress_node; // @[Router.scala:112:13]
wire [2:0] _input_unit_3_from_9_io_router_req_bits_flow_ingress_node_id; // @[Router.scala:112:13]
wire [3:0] _input_unit_3_from_9_io_router_req_bits_flow_egress_node; // @[Router.scala:112:13]
wire [1:0] _input_unit_3_from_9_io_router_req_bits_flow_egress_node_id; // @[Router.scala:112:13]
wire _input_unit_3_from_9_io_vcalloc_req_valid; // @[Router.scala:112:13]
wire _input_unit_3_from_9_io_vcalloc_req_bits_vc_sel_5_0; // @[Router.scala:112:13]
wire _input_unit_3_from_9_io_vcalloc_req_bits_vc_sel_4_0; // @[Router.scala:112:13]
wire _input_unit_3_from_9_io_vcalloc_req_bits_vc_sel_3_0; // @[Router.scala:112:13]
wire _input_unit_3_from_9_io_vcalloc_req_bits_vc_sel_3_1; // @[Router.scala:112:13]
wire _input_unit_3_from_9_io_vcalloc_req_bits_vc_sel_3_2; // @[Router.scala:112:13]
wire _input_unit_3_from_9_io_vcalloc_req_bits_vc_sel_2_0; // @[Router.scala:112:13]
wire _input_unit_3_from_9_io_vcalloc_req_bits_vc_sel_2_1; // @[Router.scala:112:13]
wire _input_unit_3_from_9_io_vcalloc_req_bits_vc_sel_2_2; // @[Router.scala:112:13]
wire _input_unit_3_from_9_io_vcalloc_req_bits_vc_sel_1_0; // @[Router.scala:112:13]
wire _input_unit_3_from_9_io_vcalloc_req_bits_vc_sel_1_1; // @[Router.scala:112:13]
wire _input_unit_3_from_9_io_vcalloc_req_bits_vc_sel_1_2; // @[Router.scala:112:13]
wire _input_unit_3_from_9_io_vcalloc_req_bits_vc_sel_0_0; // @[Router.scala:112:13]
wire _input_unit_3_from_9_io_vcalloc_req_bits_vc_sel_0_1; // @[Router.scala:112:13]
wire _input_unit_3_from_9_io_vcalloc_req_bits_vc_sel_0_2; // @[Router.scala:112:13]
wire _input_unit_3_from_9_io_salloc_req_0_valid; // @[Router.scala:112:13]
wire _input_unit_3_from_9_io_salloc_req_0_bits_vc_sel_5_0; // @[Router.scala:112:13]
wire _input_unit_3_from_9_io_salloc_req_0_bits_vc_sel_4_0; // @[Router.scala:112:13]
wire _input_unit_3_from_9_io_salloc_req_0_bits_vc_sel_3_0; // @[Router.scala:112:13]
wire _input_unit_3_from_9_io_salloc_req_0_bits_vc_sel_3_1; // @[Router.scala:112:13]
wire _input_unit_3_from_9_io_salloc_req_0_bits_vc_sel_3_2; // @[Router.scala:112:13]
wire _input_unit_3_from_9_io_salloc_req_0_bits_vc_sel_2_0; // @[Router.scala:112:13]
wire _input_unit_3_from_9_io_salloc_req_0_bits_vc_sel_2_1; // @[Router.scala:112:13]
wire _input_unit_3_from_9_io_salloc_req_0_bits_vc_sel_2_2; // @[Router.scala:112:13]
wire _input_unit_3_from_9_io_salloc_req_0_bits_vc_sel_1_0; // @[Router.scala:112:13]
wire _input_unit_3_from_9_io_salloc_req_0_bits_vc_sel_1_1; // @[Router.scala:112:13]
wire _input_unit_3_from_9_io_salloc_req_0_bits_vc_sel_1_2; // @[Router.scala:112:13]
wire _input_unit_3_from_9_io_salloc_req_0_bits_vc_sel_0_0; // @[Router.scala:112:13]
wire _input_unit_3_from_9_io_salloc_req_0_bits_vc_sel_0_1; // @[Router.scala:112:13]
wire _input_unit_3_from_9_io_salloc_req_0_bits_vc_sel_0_2; // @[Router.scala:112:13]
wire _input_unit_3_from_9_io_salloc_req_0_bits_tail; // @[Router.scala:112:13]
wire _input_unit_3_from_9_io_out_0_valid; // @[Router.scala:112:13]
wire _input_unit_3_from_9_io_out_0_bits_flit_head; // @[Router.scala:112:13]
wire _input_unit_3_from_9_io_out_0_bits_flit_tail; // @[Router.scala:112:13]
wire [144:0] _input_unit_3_from_9_io_out_0_bits_flit_payload; // @[Router.scala:112:13]
wire [1:0] _input_unit_3_from_9_io_out_0_bits_flit_flow_vnet_id; // @[Router.scala:112:13]
wire [3:0] _input_unit_3_from_9_io_out_0_bits_flit_flow_ingress_node; // @[Router.scala:112:13]
wire [2:0] _input_unit_3_from_9_io_out_0_bits_flit_flow_ingress_node_id; // @[Router.scala:112:13]
wire [3:0] _input_unit_3_from_9_io_out_0_bits_flit_flow_egress_node; // @[Router.scala:112:13]
wire [1:0] _input_unit_3_from_9_io_out_0_bits_flit_flow_egress_node_id; // @[Router.scala:112:13]
wire [1:0] _input_unit_3_from_9_io_out_0_bits_out_virt_channel; // @[Router.scala:112:13]
wire [1:0] _input_unit_2_from_6_io_router_req_bits_src_virt_id; // @[Router.scala:112:13]
wire [1:0] _input_unit_2_from_6_io_router_req_bits_flow_vnet_id; // @[Router.scala:112:13]
wire [3:0] _input_unit_2_from_6_io_router_req_bits_flow_ingress_node; // @[Router.scala:112:13]
wire [2:0] _input_unit_2_from_6_io_router_req_bits_flow_ingress_node_id; // @[Router.scala:112:13]
wire [3:0] _input_unit_2_from_6_io_router_req_bits_flow_egress_node; // @[Router.scala:112:13]
wire [1:0] _input_unit_2_from_6_io_router_req_bits_flow_egress_node_id; // @[Router.scala:112:13]
wire _input_unit_2_from_6_io_vcalloc_req_valid; // @[Router.scala:112:13]
wire _input_unit_2_from_6_io_vcalloc_req_bits_vc_sel_5_0; // @[Router.scala:112:13]
wire _input_unit_2_from_6_io_vcalloc_req_bits_vc_sel_4_0; // @[Router.scala:112:13]
wire _input_unit_2_from_6_io_vcalloc_req_bits_vc_sel_3_0; // @[Router.scala:112:13]
wire _input_unit_2_from_6_io_vcalloc_req_bits_vc_sel_3_1; // @[Router.scala:112:13]
wire _input_unit_2_from_6_io_vcalloc_req_bits_vc_sel_3_2; // @[Router.scala:112:13]
wire _input_unit_2_from_6_io_vcalloc_req_bits_vc_sel_2_0; // @[Router.scala:112:13]
wire _input_unit_2_from_6_io_vcalloc_req_bits_vc_sel_2_1; // @[Router.scala:112:13]
wire _input_unit_2_from_6_io_vcalloc_req_bits_vc_sel_2_2; // @[Router.scala:112:13]
wire _input_unit_2_from_6_io_vcalloc_req_bits_vc_sel_1_0; // @[Router.scala:112:13]
wire _input_unit_2_from_6_io_vcalloc_req_bits_vc_sel_1_1; // @[Router.scala:112:13]
wire _input_unit_2_from_6_io_vcalloc_req_bits_vc_sel_1_2; // @[Router.scala:112:13]
wire _input_unit_2_from_6_io_vcalloc_req_bits_vc_sel_0_0; // @[Router.scala:112:13]
wire _input_unit_2_from_6_io_vcalloc_req_bits_vc_sel_0_1; // @[Router.scala:112:13]
wire _input_unit_2_from_6_io_vcalloc_req_bits_vc_sel_0_2; // @[Router.scala:112:13]
wire _input_unit_2_from_6_io_salloc_req_0_valid; // @[Router.scala:112:13]
wire _input_unit_2_from_6_io_salloc_req_0_bits_vc_sel_5_0; // @[Router.scala:112:13]
wire _input_unit_2_from_6_io_salloc_req_0_bits_vc_sel_4_0; // @[Router.scala:112:13]
wire _input_unit_2_from_6_io_salloc_req_0_bits_vc_sel_3_0; // @[Router.scala:112:13]
wire _input_unit_2_from_6_io_salloc_req_0_bits_vc_sel_3_1; // @[Router.scala:112:13]
wire _input_unit_2_from_6_io_salloc_req_0_bits_vc_sel_3_2; // @[Router.scala:112:13]
wire _input_unit_2_from_6_io_salloc_req_0_bits_vc_sel_2_0; // @[Router.scala:112:13]
wire _input_unit_2_from_6_io_salloc_req_0_bits_vc_sel_2_1; // @[Router.scala:112:13]
wire _input_unit_2_from_6_io_salloc_req_0_bits_vc_sel_2_2; // @[Router.scala:112:13]
wire _input_unit_2_from_6_io_salloc_req_0_bits_vc_sel_1_0; // @[Router.scala:112:13]
wire _input_unit_2_from_6_io_salloc_req_0_bits_vc_sel_1_1; // @[Router.scala:112:13]
wire _input_unit_2_from_6_io_salloc_req_0_bits_vc_sel_1_2; // @[Router.scala:112:13]
wire _input_unit_2_from_6_io_salloc_req_0_bits_vc_sel_0_0; // @[Router.scala:112:13]
wire _input_unit_2_from_6_io_salloc_req_0_bits_vc_sel_0_1; // @[Router.scala:112:13]
wire _input_unit_2_from_6_io_salloc_req_0_bits_vc_sel_0_2; // @[Router.scala:112:13]
wire _input_unit_2_from_6_io_salloc_req_0_bits_tail; // @[Router.scala:112:13]
wire _input_unit_2_from_6_io_out_0_valid; // @[Router.scala:112:13]
wire _input_unit_2_from_6_io_out_0_bits_flit_head; // @[Router.scala:112:13]
wire _input_unit_2_from_6_io_out_0_bits_flit_tail; // @[Router.scala:112:13]
wire [144:0] _input_unit_2_from_6_io_out_0_bits_flit_payload; // @[Router.scala:112:13]
wire [1:0] _input_unit_2_from_6_io_out_0_bits_flit_flow_vnet_id; // @[Router.scala:112:13]
wire [3:0] _input_unit_2_from_6_io_out_0_bits_flit_flow_ingress_node; // @[Router.scala:112:13]
wire [2:0] _input_unit_2_from_6_io_out_0_bits_flit_flow_ingress_node_id; // @[Router.scala:112:13]
wire [3:0] _input_unit_2_from_6_io_out_0_bits_flit_flow_egress_node; // @[Router.scala:112:13]
wire [1:0] _input_unit_2_from_6_io_out_0_bits_flit_flow_egress_node_id; // @[Router.scala:112:13]
wire [1:0] _input_unit_2_from_6_io_out_0_bits_out_virt_channel; // @[Router.scala:112:13]
wire [1:0] _input_unit_1_from_4_io_router_req_bits_src_virt_id; // @[Router.scala:112:13]
wire [1:0] _input_unit_1_from_4_io_router_req_bits_flow_vnet_id; // @[Router.scala:112:13]
wire [3:0] _input_unit_1_from_4_io_router_req_bits_flow_ingress_node; // @[Router.scala:112:13]
wire [2:0] _input_unit_1_from_4_io_router_req_bits_flow_ingress_node_id; // @[Router.scala:112:13]
wire [3:0] _input_unit_1_from_4_io_router_req_bits_flow_egress_node; // @[Router.scala:112:13]
wire [1:0] _input_unit_1_from_4_io_router_req_bits_flow_egress_node_id; // @[Router.scala:112:13]
wire _input_unit_1_from_4_io_vcalloc_req_valid; // @[Router.scala:112:13]
wire _input_unit_1_from_4_io_vcalloc_req_bits_vc_sel_5_0; // @[Router.scala:112:13]
wire _input_unit_1_from_4_io_vcalloc_req_bits_vc_sel_4_0; // @[Router.scala:112:13]
wire _input_unit_1_from_4_io_vcalloc_req_bits_vc_sel_3_0; // @[Router.scala:112:13]
wire _input_unit_1_from_4_io_vcalloc_req_bits_vc_sel_3_1; // @[Router.scala:112:13]
wire _input_unit_1_from_4_io_vcalloc_req_bits_vc_sel_3_2; // @[Router.scala:112:13]
wire _input_unit_1_from_4_io_vcalloc_req_bits_vc_sel_2_0; // @[Router.scala:112:13]
wire _input_unit_1_from_4_io_vcalloc_req_bits_vc_sel_2_1; // @[Router.scala:112:13]
wire _input_unit_1_from_4_io_vcalloc_req_bits_vc_sel_2_2; // @[Router.scala:112:13]
wire _input_unit_1_from_4_io_vcalloc_req_bits_vc_sel_1_0; // @[Router.scala:112:13]
wire _input_unit_1_from_4_io_vcalloc_req_bits_vc_sel_1_1; // @[Router.scala:112:13]
wire _input_unit_1_from_4_io_vcalloc_req_bits_vc_sel_1_2; // @[Router.scala:112:13]
wire _input_unit_1_from_4_io_vcalloc_req_bits_vc_sel_0_0; // @[Router.scala:112:13]
wire _input_unit_1_from_4_io_vcalloc_req_bits_vc_sel_0_1; // @[Router.scala:112:13]
wire _input_unit_1_from_4_io_vcalloc_req_bits_vc_sel_0_2; // @[Router.scala:112:13]
wire _input_unit_1_from_4_io_salloc_req_0_valid; // @[Router.scala:112:13]
wire _input_unit_1_from_4_io_salloc_req_0_bits_vc_sel_5_0; // @[Router.scala:112:13]
wire _input_unit_1_from_4_io_salloc_req_0_bits_vc_sel_4_0; // @[Router.scala:112:13]
wire _input_unit_1_from_4_io_salloc_req_0_bits_vc_sel_3_0; // @[Router.scala:112:13]
wire _input_unit_1_from_4_io_salloc_req_0_bits_vc_sel_3_1; // @[Router.scala:112:13]
wire _input_unit_1_from_4_io_salloc_req_0_bits_vc_sel_3_2; // @[Router.scala:112:13]
wire _input_unit_1_from_4_io_salloc_req_0_bits_vc_sel_2_0; // @[Router.scala:112:13]
wire _input_unit_1_from_4_io_salloc_req_0_bits_vc_sel_2_1; // @[Router.scala:112:13]
wire _input_unit_1_from_4_io_salloc_req_0_bits_vc_sel_2_2; // @[Router.scala:112:13]
wire _input_unit_1_from_4_io_salloc_req_0_bits_vc_sel_1_0; // @[Router.scala:112:13]
wire _input_unit_1_from_4_io_salloc_req_0_bits_vc_sel_1_1; // @[Router.scala:112:13]
wire _input_unit_1_from_4_io_salloc_req_0_bits_vc_sel_1_2; // @[Router.scala:112:13]
wire _input_unit_1_from_4_io_salloc_req_0_bits_vc_sel_0_0; // @[Router.scala:112:13]
wire _input_unit_1_from_4_io_salloc_req_0_bits_vc_sel_0_1; // @[Router.scala:112:13]
wire _input_unit_1_from_4_io_salloc_req_0_bits_vc_sel_0_2; // @[Router.scala:112:13]
wire _input_unit_1_from_4_io_salloc_req_0_bits_tail; // @[Router.scala:112:13]
wire _input_unit_1_from_4_io_out_0_valid; // @[Router.scala:112:13]
wire _input_unit_1_from_4_io_out_0_bits_flit_head; // @[Router.scala:112:13]
wire _input_unit_1_from_4_io_out_0_bits_flit_tail; // @[Router.scala:112:13]
wire [144:0] _input_unit_1_from_4_io_out_0_bits_flit_payload; // @[Router.scala:112:13]
wire [1:0] _input_unit_1_from_4_io_out_0_bits_flit_flow_vnet_id; // @[Router.scala:112:13]
wire [3:0] _input_unit_1_from_4_io_out_0_bits_flit_flow_ingress_node; // @[Router.scala:112:13]
wire [2:0] _input_unit_1_from_4_io_out_0_bits_flit_flow_ingress_node_id; // @[Router.scala:112:13]
wire [3:0] _input_unit_1_from_4_io_out_0_bits_flit_flow_egress_node; // @[Router.scala:112:13]
wire [1:0] _input_unit_1_from_4_io_out_0_bits_flit_flow_egress_node_id; // @[Router.scala:112:13]
wire [1:0] _input_unit_1_from_4_io_out_0_bits_out_virt_channel; // @[Router.scala:112:13]
wire [1:0] _input_unit_0_from_1_io_router_req_bits_src_virt_id; // @[Router.scala:112:13]
wire [1:0] _input_unit_0_from_1_io_router_req_bits_flow_vnet_id; // @[Router.scala:112:13]
wire [3:0] _input_unit_0_from_1_io_router_req_bits_flow_ingress_node; // @[Router.scala:112:13]
wire [2:0] _input_unit_0_from_1_io_router_req_bits_flow_ingress_node_id; // @[Router.scala:112:13]
wire [3:0] _input_unit_0_from_1_io_router_req_bits_flow_egress_node; // @[Router.scala:112:13]
wire [1:0] _input_unit_0_from_1_io_router_req_bits_flow_egress_node_id; // @[Router.scala:112:13]
wire _input_unit_0_from_1_io_vcalloc_req_valid; // @[Router.scala:112:13]
wire _input_unit_0_from_1_io_vcalloc_req_bits_vc_sel_5_0; // @[Router.scala:112:13]
wire _input_unit_0_from_1_io_vcalloc_req_bits_vc_sel_4_0; // @[Router.scala:112:13]
wire _input_unit_0_from_1_io_vcalloc_req_bits_vc_sel_3_0; // @[Router.scala:112:13]
wire _input_unit_0_from_1_io_vcalloc_req_bits_vc_sel_3_1; // @[Router.scala:112:13]
wire _input_unit_0_from_1_io_vcalloc_req_bits_vc_sel_3_2; // @[Router.scala:112:13]
wire _input_unit_0_from_1_io_vcalloc_req_bits_vc_sel_2_0; // @[Router.scala:112:13]
wire _input_unit_0_from_1_io_vcalloc_req_bits_vc_sel_2_1; // @[Router.scala:112:13]
wire _input_unit_0_from_1_io_vcalloc_req_bits_vc_sel_2_2; // @[Router.scala:112:13]
wire _input_unit_0_from_1_io_vcalloc_req_bits_vc_sel_1_0; // @[Router.scala:112:13]
wire _input_unit_0_from_1_io_vcalloc_req_bits_vc_sel_1_1; // @[Router.scala:112:13]
wire _input_unit_0_from_1_io_vcalloc_req_bits_vc_sel_1_2; // @[Router.scala:112:13]
wire _input_unit_0_from_1_io_vcalloc_req_bits_vc_sel_0_0; // @[Router.scala:112:13]
wire _input_unit_0_from_1_io_vcalloc_req_bits_vc_sel_0_1; // @[Router.scala:112:13]
wire _input_unit_0_from_1_io_vcalloc_req_bits_vc_sel_0_2; // @[Router.scala:112:13]
wire _input_unit_0_from_1_io_salloc_req_0_valid; // @[Router.scala:112:13]
wire _input_unit_0_from_1_io_salloc_req_0_bits_vc_sel_5_0; // @[Router.scala:112:13]
wire _input_unit_0_from_1_io_salloc_req_0_bits_vc_sel_4_0; // @[Router.scala:112:13]
wire _input_unit_0_from_1_io_salloc_req_0_bits_vc_sel_3_0; // @[Router.scala:112:13]
wire _input_unit_0_from_1_io_salloc_req_0_bits_vc_sel_3_1; // @[Router.scala:112:13]
wire _input_unit_0_from_1_io_salloc_req_0_bits_vc_sel_3_2; // @[Router.scala:112:13]
wire _input_unit_0_from_1_io_salloc_req_0_bits_vc_sel_2_0; // @[Router.scala:112:13]
wire _input_unit_0_from_1_io_salloc_req_0_bits_vc_sel_2_1; // @[Router.scala:112:13]
wire _input_unit_0_from_1_io_salloc_req_0_bits_vc_sel_2_2; // @[Router.scala:112:13]
wire _input_unit_0_from_1_io_salloc_req_0_bits_vc_sel_1_0; // @[Router.scala:112:13]
wire _input_unit_0_from_1_io_salloc_req_0_bits_vc_sel_1_1; // @[Router.scala:112:13]
wire _input_unit_0_from_1_io_salloc_req_0_bits_vc_sel_1_2; // @[Router.scala:112:13]
wire _input_unit_0_from_1_io_salloc_req_0_bits_vc_sel_0_0; // @[Router.scala:112:13]
wire _input_unit_0_from_1_io_salloc_req_0_bits_vc_sel_0_1; // @[Router.scala:112:13]
wire _input_unit_0_from_1_io_salloc_req_0_bits_vc_sel_0_2; // @[Router.scala:112:13]
wire _input_unit_0_from_1_io_salloc_req_0_bits_tail; // @[Router.scala:112:13]
wire _input_unit_0_from_1_io_out_0_valid; // @[Router.scala:112:13]
wire _input_unit_0_from_1_io_out_0_bits_flit_head; // @[Router.scala:112:13]
wire _input_unit_0_from_1_io_out_0_bits_flit_tail; // @[Router.scala:112:13]
wire [144:0] _input_unit_0_from_1_io_out_0_bits_flit_payload; // @[Router.scala:112:13]
wire [1:0] _input_unit_0_from_1_io_out_0_bits_flit_flow_vnet_id; // @[Router.scala:112:13]
wire [3:0] _input_unit_0_from_1_io_out_0_bits_flit_flow_ingress_node; // @[Router.scala:112:13]
wire [2:0] _input_unit_0_from_1_io_out_0_bits_flit_flow_ingress_node_id; // @[Router.scala:112:13]
wire [3:0] _input_unit_0_from_1_io_out_0_bits_flit_flow_egress_node; // @[Router.scala:112:13]
wire [1:0] _input_unit_0_from_1_io_out_0_bits_flit_flow_egress_node_id; // @[Router.scala:112:13]
wire [1:0] _input_unit_0_from_1_io_out_0_bits_out_virt_channel; // @[Router.scala:112:13]
wire [2:0] fires_count = {1'h0, {1'h0, _vc_allocator_io_req_0_ready & _input_unit_0_from_1_io_vcalloc_req_valid} + {1'h0, _vc_allocator_io_req_1_ready & _input_unit_1_from_4_io_vcalloc_req_valid}} + {1'h0, {1'h0, _vc_allocator_io_req_2_ready & _input_unit_2_from_6_io_vcalloc_req_valid} + {1'h0, _vc_allocator_io_req_3_ready & _input_unit_3_from_9_io_vcalloc_req_valid} + {1'h0, _vc_allocator_io_req_4_ready & _ingress_unit_4_from_27_io_vcalloc_req_valid}}; // @[Decoupled.scala:51:35]
reg [63:0] debug_tsc; // @[Router.scala:195:28]
reg [63:0] debug_sample; // @[Router.scala:197:31]
wire _GEN = debug_sample == {44'h0, _plusarg_reader_out - 20'h1}; // @[PlusArg.scala:80:11]
reg [63:0] util_ctr; // @[Router.scala:203:29]
reg fired; // @[Router.scala:204:26]
wire _GEN_0 = (|_plusarg_reader_out) & _GEN; // @[PlusArg.scala:80:11]
wire _GEN_1 = _GEN_0 & fired; // @[Router.scala:204:26, :207:{33,71}]
reg [63:0] util_ctr_1; // @[Router.scala:203:29]
reg fired_1; // @[Router.scala:204:26]
wire _GEN_2 = _GEN_0 & fired_1; // @[Router.scala:204:26, :207:{33,71}]
reg [63:0] util_ctr_2; // @[Router.scala:203:29]
reg fired_2; // @[Router.scala:204:26]
wire _GEN_3 = _GEN_0 & fired_2; // @[Router.scala:204:26, :207:{33,71}]
reg [63:0] util_ctr_3; // @[Router.scala:203:29]
reg fired_3; // @[Router.scala:204:26]
wire _GEN_4 = _GEN_0 & fired_3; // @[Router.scala:204:26, :207:{33,71}]
reg [63:0] util_ctr_4; // @[Router.scala:203:29]
reg fired_4; // @[Router.scala:204:26]
wire _GEN_5 = _GEN_0 & fired_4; // @[Router.scala:204:26, :207:{33,71}]
reg [63:0] util_ctr_5; // @[Router.scala:203:29]
reg fired_5; // @[Router.scala:204:26]
wire _GEN_6 = _GEN_0 & fired_5; // @[Router.scala:204:26, :207:{33,71}]
reg [63:0] util_ctr_6; // @[Router.scala:203:29]
reg fired_6; // @[Router.scala:204:26]
wire _GEN_7 = _GEN_0 & fired_6; // @[Router.scala:204:26, :207:{33,71}] |
Generate the Verilog code corresponding to the following Chisel files.
File MulAddRecFN.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import chisel3.util._
import consts._
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class MulAddRecFN_interIo(expWidth: Int, sigWidth: Int) extends Bundle
{
//*** ENCODE SOME OF THESE CASES IN FEWER BITS?:
val isSigNaNAny = Bool()
val isNaNAOrB = Bool()
val isInfA = Bool()
val isZeroA = Bool()
val isInfB = Bool()
val isZeroB = Bool()
val signProd = Bool()
val isNaNC = Bool()
val isInfC = Bool()
val isZeroC = Bool()
val sExpSum = SInt((expWidth + 2).W)
val doSubMags = Bool()
val CIsDominant = Bool()
val CDom_CAlignDist = UInt(log2Ceil(sigWidth + 1).W)
val highAlignedSigC = UInt((sigWidth + 2).W)
val bit0AlignedSigC = UInt(1.W)
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class MulAddRecFNToRaw_preMul(expWidth: Int, sigWidth: Int) extends RawModule
{
override def desiredName = s"MulAddRecFNToRaw_preMul_e${expWidth}_s${sigWidth}"
val io = IO(new Bundle {
val op = Input(Bits(2.W))
val a = Input(Bits((expWidth + sigWidth + 1).W))
val b = Input(Bits((expWidth + sigWidth + 1).W))
val c = Input(Bits((expWidth + sigWidth + 1).W))
val mulAddA = Output(UInt(sigWidth.W))
val mulAddB = Output(UInt(sigWidth.W))
val mulAddC = Output(UInt((sigWidth * 2).W))
val toPostMul = Output(new MulAddRecFN_interIo(expWidth, sigWidth))
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
//*** POSSIBLE TO REDUCE THIS BY 1 OR 2 BITS? (CURRENTLY 2 BITS BETWEEN
//*** UNSHIFTED C AND PRODUCT):
val sigSumWidth = sigWidth * 3 + 3
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val rawA = rawFloatFromRecFN(expWidth, sigWidth, io.a)
val rawB = rawFloatFromRecFN(expWidth, sigWidth, io.b)
val rawC = rawFloatFromRecFN(expWidth, sigWidth, io.c)
val signProd = rawA.sign ^ rawB.sign ^ io.op(1)
//*** REVIEW THE BIAS FOR 'sExpAlignedProd':
val sExpAlignedProd =
rawA.sExp +& rawB.sExp + (-(BigInt(1)<<expWidth) + sigWidth + 3).S
val doSubMags = signProd ^ rawC.sign ^ io.op(0)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val sNatCAlignDist = sExpAlignedProd - rawC.sExp
val posNatCAlignDist = sNatCAlignDist(expWidth + 1, 0)
val isMinCAlign = rawA.isZero || rawB.isZero || (sNatCAlignDist < 0.S)
val CIsDominant =
! rawC.isZero && (isMinCAlign || (posNatCAlignDist <= sigWidth.U))
val CAlignDist =
Mux(isMinCAlign,
0.U,
Mux(posNatCAlignDist < (sigSumWidth - 1).U,
posNatCAlignDist(log2Ceil(sigSumWidth) - 1, 0),
(sigSumWidth - 1).U
)
)
val mainAlignedSigC =
(Mux(doSubMags, ~rawC.sig, rawC.sig) ## Fill(sigSumWidth - sigWidth + 2, doSubMags)).asSInt>>CAlignDist
val reduced4CExtra =
(orReduceBy4(rawC.sig<<((sigSumWidth - sigWidth - 1) & 3)) &
lowMask(
CAlignDist>>2,
//*** NOT NEEDED?:
// (sigSumWidth + 2)>>2,
(sigSumWidth - 1)>>2,
(sigSumWidth - sigWidth - 1)>>2
)
).orR
val alignedSigC =
Cat(mainAlignedSigC>>3,
Mux(doSubMags,
mainAlignedSigC(2, 0).andR && ! reduced4CExtra,
mainAlignedSigC(2, 0).orR || reduced4CExtra
)
)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
io.mulAddA := rawA.sig
io.mulAddB := rawB.sig
io.mulAddC := alignedSigC(sigWidth * 2, 1)
io.toPostMul.isSigNaNAny :=
isSigNaNRawFloat(rawA) || isSigNaNRawFloat(rawB) ||
isSigNaNRawFloat(rawC)
io.toPostMul.isNaNAOrB := rawA.isNaN || rawB.isNaN
io.toPostMul.isInfA := rawA.isInf
io.toPostMul.isZeroA := rawA.isZero
io.toPostMul.isInfB := rawB.isInf
io.toPostMul.isZeroB := rawB.isZero
io.toPostMul.signProd := signProd
io.toPostMul.isNaNC := rawC.isNaN
io.toPostMul.isInfC := rawC.isInf
io.toPostMul.isZeroC := rawC.isZero
io.toPostMul.sExpSum :=
Mux(CIsDominant, rawC.sExp, sExpAlignedProd - sigWidth.S)
io.toPostMul.doSubMags := doSubMags
io.toPostMul.CIsDominant := CIsDominant
io.toPostMul.CDom_CAlignDist := CAlignDist(log2Ceil(sigWidth + 1) - 1, 0)
io.toPostMul.highAlignedSigC :=
alignedSigC(sigSumWidth - 1, sigWidth * 2 + 1)
io.toPostMul.bit0AlignedSigC := alignedSigC(0)
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class MulAddRecFNToRaw_postMul(expWidth: Int, sigWidth: Int) extends RawModule
{
override def desiredName = s"MulAddRecFNToRaw_postMul_e${expWidth}_s${sigWidth}"
val io = IO(new Bundle {
val fromPreMul = Input(new MulAddRecFN_interIo(expWidth, sigWidth))
val mulAddResult = Input(UInt((sigWidth * 2 + 1).W))
val roundingMode = Input(UInt(3.W))
val invalidExc = Output(Bool())
val rawOut = Output(new RawFloat(expWidth, sigWidth + 2))
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val sigSumWidth = sigWidth * 3 + 3
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val roundingMode_min = (io.roundingMode === round_min)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val opSignC = io.fromPreMul.signProd ^ io.fromPreMul.doSubMags
val sigSum =
Cat(Mux(io.mulAddResult(sigWidth * 2),
io.fromPreMul.highAlignedSigC + 1.U,
io.fromPreMul.highAlignedSigC
),
io.mulAddResult(sigWidth * 2 - 1, 0),
io.fromPreMul.bit0AlignedSigC
)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val CDom_sign = opSignC
val CDom_sExp = io.fromPreMul.sExpSum - io.fromPreMul.doSubMags.zext
val CDom_absSigSum =
Mux(io.fromPreMul.doSubMags,
~sigSum(sigSumWidth - 1, sigWidth + 1),
0.U(1.W) ##
//*** IF GAP IS REDUCED TO 1 BIT, MUST REDUCE THIS COMPONENT TO 1 BIT TOO:
io.fromPreMul.highAlignedSigC(sigWidth + 1, sigWidth) ##
sigSum(sigSumWidth - 3, sigWidth + 2)
)
val CDom_absSigSumExtra =
Mux(io.fromPreMul.doSubMags,
(~sigSum(sigWidth, 1)).orR,
sigSum(sigWidth + 1, 1).orR
)
val CDom_mainSig =
(CDom_absSigSum<<io.fromPreMul.CDom_CAlignDist)(
sigWidth * 2 + 1, sigWidth - 3)
val CDom_reduced4SigExtra =
(orReduceBy4(CDom_absSigSum(sigWidth - 1, 0)<<(~sigWidth & 3)) &
lowMask(io.fromPreMul.CDom_CAlignDist>>2, 0, sigWidth>>2)).orR
val CDom_sig =
Cat(CDom_mainSig>>3,
CDom_mainSig(2, 0).orR || CDom_reduced4SigExtra ||
CDom_absSigSumExtra
)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val notCDom_signSigSum = sigSum(sigWidth * 2 + 3)
val notCDom_absSigSum =
Mux(notCDom_signSigSum,
~sigSum(sigWidth * 2 + 2, 0),
sigSum(sigWidth * 2 + 2, 0) + io.fromPreMul.doSubMags
)
val notCDom_reduced2AbsSigSum = orReduceBy2(notCDom_absSigSum)
val notCDom_normDistReduced2 = countLeadingZeros(notCDom_reduced2AbsSigSum)
val notCDom_nearNormDist = notCDom_normDistReduced2<<1
val notCDom_sExp = io.fromPreMul.sExpSum - notCDom_nearNormDist.asUInt.zext
val notCDom_mainSig =
(notCDom_absSigSum<<notCDom_nearNormDist)(
sigWidth * 2 + 3, sigWidth - 1)
val notCDom_reduced4SigExtra =
(orReduceBy2(
notCDom_reduced2AbsSigSum(sigWidth>>1, 0)<<((sigWidth>>1) & 1)) &
lowMask(notCDom_normDistReduced2>>1, 0, (sigWidth + 2)>>2)
).orR
val notCDom_sig =
Cat(notCDom_mainSig>>3,
notCDom_mainSig(2, 0).orR || notCDom_reduced4SigExtra
)
val notCDom_completeCancellation =
(notCDom_sig(sigWidth + 2, sigWidth + 1) === 0.U)
val notCDom_sign =
Mux(notCDom_completeCancellation,
roundingMode_min,
io.fromPreMul.signProd ^ notCDom_signSigSum
)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val notNaN_isInfProd = io.fromPreMul.isInfA || io.fromPreMul.isInfB
val notNaN_isInfOut = notNaN_isInfProd || io.fromPreMul.isInfC
val notNaN_addZeros =
(io.fromPreMul.isZeroA || io.fromPreMul.isZeroB) &&
io.fromPreMul.isZeroC
io.invalidExc :=
io.fromPreMul.isSigNaNAny ||
(io.fromPreMul.isInfA && io.fromPreMul.isZeroB) ||
(io.fromPreMul.isZeroA && io.fromPreMul.isInfB) ||
(! io.fromPreMul.isNaNAOrB &&
(io.fromPreMul.isInfA || io.fromPreMul.isInfB) &&
io.fromPreMul.isInfC &&
io.fromPreMul.doSubMags)
io.rawOut.isNaN := io.fromPreMul.isNaNAOrB || io.fromPreMul.isNaNC
io.rawOut.isInf := notNaN_isInfOut
//*** IMPROVE?:
io.rawOut.isZero :=
notNaN_addZeros ||
(! io.fromPreMul.CIsDominant && notCDom_completeCancellation)
io.rawOut.sign :=
(notNaN_isInfProd && io.fromPreMul.signProd) ||
(io.fromPreMul.isInfC && opSignC) ||
(notNaN_addZeros && ! roundingMode_min &&
io.fromPreMul.signProd && opSignC) ||
(notNaN_addZeros && roundingMode_min &&
(io.fromPreMul.signProd || opSignC)) ||
(! notNaN_isInfOut && ! notNaN_addZeros &&
Mux(io.fromPreMul.CIsDominant, CDom_sign, notCDom_sign))
io.rawOut.sExp := Mux(io.fromPreMul.CIsDominant, CDom_sExp, notCDom_sExp)
io.rawOut.sig := Mux(io.fromPreMul.CIsDominant, CDom_sig, notCDom_sig)
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class MulAddRecFN(expWidth: Int, sigWidth: Int) extends RawModule
{
override def desiredName = s"MulAddRecFN_e${expWidth}_s${sigWidth}"
val io = IO(new Bundle {
val op = Input(Bits(2.W))
val a = Input(Bits((expWidth + sigWidth + 1).W))
val b = Input(Bits((expWidth + sigWidth + 1).W))
val c = Input(Bits((expWidth + sigWidth + 1).W))
val roundingMode = Input(UInt(3.W))
val detectTininess = Input(UInt(1.W))
val out = Output(Bits((expWidth + sigWidth + 1).W))
val exceptionFlags = Output(Bits(5.W))
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val mulAddRecFNToRaw_preMul =
Module(new MulAddRecFNToRaw_preMul(expWidth, sigWidth))
val mulAddRecFNToRaw_postMul =
Module(new MulAddRecFNToRaw_postMul(expWidth, sigWidth))
mulAddRecFNToRaw_preMul.io.op := io.op
mulAddRecFNToRaw_preMul.io.a := io.a
mulAddRecFNToRaw_preMul.io.b := io.b
mulAddRecFNToRaw_preMul.io.c := io.c
val mulAddResult =
(mulAddRecFNToRaw_preMul.io.mulAddA *
mulAddRecFNToRaw_preMul.io.mulAddB) +&
mulAddRecFNToRaw_preMul.io.mulAddC
mulAddRecFNToRaw_postMul.io.fromPreMul :=
mulAddRecFNToRaw_preMul.io.toPostMul
mulAddRecFNToRaw_postMul.io.mulAddResult := mulAddResult
mulAddRecFNToRaw_postMul.io.roundingMode := io.roundingMode
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val roundRawFNToRecFN =
Module(new RoundRawFNToRecFN(expWidth, sigWidth, 0))
roundRawFNToRecFN.io.invalidExc := mulAddRecFNToRaw_postMul.io.invalidExc
roundRawFNToRecFN.io.infiniteExc := false.B
roundRawFNToRecFN.io.in := mulAddRecFNToRaw_postMul.io.rawOut
roundRawFNToRecFN.io.roundingMode := io.roundingMode
roundRawFNToRecFN.io.detectTininess := io.detectTininess
io.out := roundRawFNToRecFN.io.out
io.exceptionFlags := roundRawFNToRecFN.io.exceptionFlags
}
| module MulAddRecFN_e8_s24_61( // @[MulAddRecFN.scala:300:7]
input [32:0] io_a, // @[MulAddRecFN.scala:303:16]
input [32:0] io_c, // @[MulAddRecFN.scala:303:16]
output [32:0] io_out // @[MulAddRecFN.scala:303:16]
);
wire _mulAddRecFNToRaw_postMul_io_invalidExc; // @[MulAddRecFN.scala:319:15]
wire _mulAddRecFNToRaw_postMul_io_rawOut_isNaN; // @[MulAddRecFN.scala:319:15]
wire _mulAddRecFNToRaw_postMul_io_rawOut_isInf; // @[MulAddRecFN.scala:319:15]
wire _mulAddRecFNToRaw_postMul_io_rawOut_isZero; // @[MulAddRecFN.scala:319:15]
wire _mulAddRecFNToRaw_postMul_io_rawOut_sign; // @[MulAddRecFN.scala:319:15]
wire [9:0] _mulAddRecFNToRaw_postMul_io_rawOut_sExp; // @[MulAddRecFN.scala:319:15]
wire [26:0] _mulAddRecFNToRaw_postMul_io_rawOut_sig; // @[MulAddRecFN.scala:319:15]
wire [23:0] _mulAddRecFNToRaw_preMul_io_mulAddA; // @[MulAddRecFN.scala:317:15]
wire [47:0] _mulAddRecFNToRaw_preMul_io_mulAddC; // @[MulAddRecFN.scala:317:15]
wire _mulAddRecFNToRaw_preMul_io_toPostMul_isSigNaNAny; // @[MulAddRecFN.scala:317:15]
wire _mulAddRecFNToRaw_preMul_io_toPostMul_isNaNAOrB; // @[MulAddRecFN.scala:317:15]
wire _mulAddRecFNToRaw_preMul_io_toPostMul_isInfA; // @[MulAddRecFN.scala:317:15]
wire _mulAddRecFNToRaw_preMul_io_toPostMul_isZeroA; // @[MulAddRecFN.scala:317:15]
wire _mulAddRecFNToRaw_preMul_io_toPostMul_signProd; // @[MulAddRecFN.scala:317:15]
wire _mulAddRecFNToRaw_preMul_io_toPostMul_isNaNC; // @[MulAddRecFN.scala:317:15]
wire _mulAddRecFNToRaw_preMul_io_toPostMul_isInfC; // @[MulAddRecFN.scala:317:15]
wire _mulAddRecFNToRaw_preMul_io_toPostMul_isZeroC; // @[MulAddRecFN.scala:317:15]
wire [9:0] _mulAddRecFNToRaw_preMul_io_toPostMul_sExpSum; // @[MulAddRecFN.scala:317:15]
wire _mulAddRecFNToRaw_preMul_io_toPostMul_doSubMags; // @[MulAddRecFN.scala:317:15]
wire _mulAddRecFNToRaw_preMul_io_toPostMul_CIsDominant; // @[MulAddRecFN.scala:317:15]
wire [4:0] _mulAddRecFNToRaw_preMul_io_toPostMul_CDom_CAlignDist; // @[MulAddRecFN.scala:317:15]
wire [25:0] _mulAddRecFNToRaw_preMul_io_toPostMul_highAlignedSigC; // @[MulAddRecFN.scala:317:15]
wire _mulAddRecFNToRaw_preMul_io_toPostMul_bit0AlignedSigC; // @[MulAddRecFN.scala:317:15]
wire [32:0] io_a_0 = io_a; // @[MulAddRecFN.scala:300:7]
wire [32:0] io_c_0 = io_c; // @[MulAddRecFN.scala:300:7]
wire io_detectTininess = 1'h1; // @[MulAddRecFN.scala:300:7, :303:16, :339:15]
wire [2:0] io_roundingMode = 3'h0; // @[MulAddRecFN.scala:300:7, :303:16, :319:15, :339:15]
wire [32:0] io_b = 33'h80000000; // @[MulAddRecFN.scala:300:7, :303:16, :317:15]
wire [1:0] io_op = 2'h0; // @[MulAddRecFN.scala:300:7, :303:16, :317:15]
wire [32:0] io_out_0; // @[MulAddRecFN.scala:300:7]
wire [4:0] io_exceptionFlags; // @[MulAddRecFN.scala:300:7]
wire [47:0] _mulAddResult_T = {1'h0, _mulAddRecFNToRaw_preMul_io_mulAddA, 23'h0}; // @[MulAddRecFN.scala:317:15, :327:45]
wire [48:0] mulAddResult = {1'h0, _mulAddResult_T} + {1'h0, _mulAddRecFNToRaw_preMul_io_mulAddC}; // @[MulAddRecFN.scala:317:15, :327:45, :328:50]
MulAddRecFNToRaw_preMul_e8_s24_61 mulAddRecFNToRaw_preMul ( // @[MulAddRecFN.scala:317:15]
.io_a (io_a_0), // @[MulAddRecFN.scala:300:7]
.io_c (io_c_0), // @[MulAddRecFN.scala:300:7]
.io_mulAddA (_mulAddRecFNToRaw_preMul_io_mulAddA),
.io_mulAddC (_mulAddRecFNToRaw_preMul_io_mulAddC),
.io_toPostMul_isSigNaNAny (_mulAddRecFNToRaw_preMul_io_toPostMul_isSigNaNAny),
.io_toPostMul_isNaNAOrB (_mulAddRecFNToRaw_preMul_io_toPostMul_isNaNAOrB),
.io_toPostMul_isInfA (_mulAddRecFNToRaw_preMul_io_toPostMul_isInfA),
.io_toPostMul_isZeroA (_mulAddRecFNToRaw_preMul_io_toPostMul_isZeroA),
.io_toPostMul_signProd (_mulAddRecFNToRaw_preMul_io_toPostMul_signProd),
.io_toPostMul_isNaNC (_mulAddRecFNToRaw_preMul_io_toPostMul_isNaNC),
.io_toPostMul_isInfC (_mulAddRecFNToRaw_preMul_io_toPostMul_isInfC),
.io_toPostMul_isZeroC (_mulAddRecFNToRaw_preMul_io_toPostMul_isZeroC),
.io_toPostMul_sExpSum (_mulAddRecFNToRaw_preMul_io_toPostMul_sExpSum),
.io_toPostMul_doSubMags (_mulAddRecFNToRaw_preMul_io_toPostMul_doSubMags),
.io_toPostMul_CIsDominant (_mulAddRecFNToRaw_preMul_io_toPostMul_CIsDominant),
.io_toPostMul_CDom_CAlignDist (_mulAddRecFNToRaw_preMul_io_toPostMul_CDom_CAlignDist),
.io_toPostMul_highAlignedSigC (_mulAddRecFNToRaw_preMul_io_toPostMul_highAlignedSigC),
.io_toPostMul_bit0AlignedSigC (_mulAddRecFNToRaw_preMul_io_toPostMul_bit0AlignedSigC)
); // @[MulAddRecFN.scala:317:15]
MulAddRecFNToRaw_postMul_e8_s24_61 mulAddRecFNToRaw_postMul ( // @[MulAddRecFN.scala:319:15]
.io_fromPreMul_isSigNaNAny (_mulAddRecFNToRaw_preMul_io_toPostMul_isSigNaNAny), // @[MulAddRecFN.scala:317:15]
.io_fromPreMul_isNaNAOrB (_mulAddRecFNToRaw_preMul_io_toPostMul_isNaNAOrB), // @[MulAddRecFN.scala:317:15]
.io_fromPreMul_isInfA (_mulAddRecFNToRaw_preMul_io_toPostMul_isInfA), // @[MulAddRecFN.scala:317:15]
.io_fromPreMul_isZeroA (_mulAddRecFNToRaw_preMul_io_toPostMul_isZeroA), // @[MulAddRecFN.scala:317:15]
.io_fromPreMul_signProd (_mulAddRecFNToRaw_preMul_io_toPostMul_signProd), // @[MulAddRecFN.scala:317:15]
.io_fromPreMul_isNaNC (_mulAddRecFNToRaw_preMul_io_toPostMul_isNaNC), // @[MulAddRecFN.scala:317:15]
.io_fromPreMul_isInfC (_mulAddRecFNToRaw_preMul_io_toPostMul_isInfC), // @[MulAddRecFN.scala:317:15]
.io_fromPreMul_isZeroC (_mulAddRecFNToRaw_preMul_io_toPostMul_isZeroC), // @[MulAddRecFN.scala:317:15]
.io_fromPreMul_sExpSum (_mulAddRecFNToRaw_preMul_io_toPostMul_sExpSum), // @[MulAddRecFN.scala:317:15]
.io_fromPreMul_doSubMags (_mulAddRecFNToRaw_preMul_io_toPostMul_doSubMags), // @[MulAddRecFN.scala:317:15]
.io_fromPreMul_CIsDominant (_mulAddRecFNToRaw_preMul_io_toPostMul_CIsDominant), // @[MulAddRecFN.scala:317:15]
.io_fromPreMul_CDom_CAlignDist (_mulAddRecFNToRaw_preMul_io_toPostMul_CDom_CAlignDist), // @[MulAddRecFN.scala:317:15]
.io_fromPreMul_highAlignedSigC (_mulAddRecFNToRaw_preMul_io_toPostMul_highAlignedSigC), // @[MulAddRecFN.scala:317:15]
.io_fromPreMul_bit0AlignedSigC (_mulAddRecFNToRaw_preMul_io_toPostMul_bit0AlignedSigC), // @[MulAddRecFN.scala:317:15]
.io_mulAddResult (mulAddResult), // @[MulAddRecFN.scala:328:50]
.io_invalidExc (_mulAddRecFNToRaw_postMul_io_invalidExc),
.io_rawOut_isNaN (_mulAddRecFNToRaw_postMul_io_rawOut_isNaN),
.io_rawOut_isInf (_mulAddRecFNToRaw_postMul_io_rawOut_isInf),
.io_rawOut_isZero (_mulAddRecFNToRaw_postMul_io_rawOut_isZero),
.io_rawOut_sign (_mulAddRecFNToRaw_postMul_io_rawOut_sign),
.io_rawOut_sExp (_mulAddRecFNToRaw_postMul_io_rawOut_sExp),
.io_rawOut_sig (_mulAddRecFNToRaw_postMul_io_rawOut_sig)
); // @[MulAddRecFN.scala:319:15]
RoundRawFNToRecFN_e8_s24_85 roundRawFNToRecFN ( // @[MulAddRecFN.scala:339:15]
.io_invalidExc (_mulAddRecFNToRaw_postMul_io_invalidExc), // @[MulAddRecFN.scala:319:15]
.io_in_isNaN (_mulAddRecFNToRaw_postMul_io_rawOut_isNaN), // @[MulAddRecFN.scala:319:15]
.io_in_isInf (_mulAddRecFNToRaw_postMul_io_rawOut_isInf), // @[MulAddRecFN.scala:319:15]
.io_in_isZero (_mulAddRecFNToRaw_postMul_io_rawOut_isZero), // @[MulAddRecFN.scala:319:15]
.io_in_sign (_mulAddRecFNToRaw_postMul_io_rawOut_sign), // @[MulAddRecFN.scala:319:15]
.io_in_sExp (_mulAddRecFNToRaw_postMul_io_rawOut_sExp), // @[MulAddRecFN.scala:319:15]
.io_in_sig (_mulAddRecFNToRaw_postMul_io_rawOut_sig), // @[MulAddRecFN.scala:319:15]
.io_out (io_out_0),
.io_exceptionFlags (io_exceptionFlags)
); // @[MulAddRecFN.scala:339:15]
assign io_out = io_out_0; // @[MulAddRecFN.scala:300:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File RecFNToRecFN.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import consts._
class
RecFNToRecFN(
inExpWidth: Int, inSigWidth: Int, outExpWidth: Int, outSigWidth: Int)
extends chisel3.RawModule
{
val io = IO(new Bundle {
val in = Input(Bits((inExpWidth + inSigWidth + 1).W))
val roundingMode = Input(UInt(3.W))
val detectTininess = Input(UInt(1.W))
val out = Output(Bits((outExpWidth + outSigWidth + 1).W))
val exceptionFlags = Output(Bits(5.W))
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val rawIn = rawFloatFromRecFN(inExpWidth, inSigWidth, io.in);
if ((inExpWidth == outExpWidth) && (inSigWidth <= outSigWidth)) {
//--------------------------------------------------------------------
//--------------------------------------------------------------------
io.out := io.in<<(outSigWidth - inSigWidth)
io.exceptionFlags := isSigNaNRawFloat(rawIn) ## 0.U(4.W)
} else {
//--------------------------------------------------------------------
//--------------------------------------------------------------------
val roundAnyRawFNToRecFN =
Module(
new RoundAnyRawFNToRecFN(
inExpWidth,
inSigWidth,
outExpWidth,
outSigWidth,
flRoundOpt_sigMSBitAlwaysZero
))
roundAnyRawFNToRecFN.io.invalidExc := isSigNaNRawFloat(rawIn)
roundAnyRawFNToRecFN.io.infiniteExc := false.B
roundAnyRawFNToRecFN.io.in := rawIn
roundAnyRawFNToRecFN.io.roundingMode := io.roundingMode
roundAnyRawFNToRecFN.io.detectTininess := io.detectTininess
io.out := roundAnyRawFNToRecFN.io.out
io.exceptionFlags := roundAnyRawFNToRecFN.io.exceptionFlags
}
}
File rawFloatFromRecFN.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import chisel3.util._
/*----------------------------------------------------------------------------
| In the result, no more than one of 'isNaN', 'isInf', and 'isZero' will be
| set.
*----------------------------------------------------------------------------*/
object rawFloatFromRecFN
{
def apply(expWidth: Int, sigWidth: Int, in: Bits): RawFloat =
{
val exp = in(expWidth + sigWidth - 1, sigWidth - 1)
val isZero = exp(expWidth, expWidth - 2) === 0.U
val isSpecial = exp(expWidth, expWidth - 1) === 3.U
val out = Wire(new RawFloat(expWidth, sigWidth))
out.isNaN := isSpecial && exp(expWidth - 2)
out.isInf := isSpecial && ! exp(expWidth - 2)
out.isZero := isZero
out.sign := in(expWidth + sigWidth)
out.sExp := exp.zext
out.sig := 0.U(1.W) ## ! isZero ## in(sigWidth - 2, 0)
out
}
}
| module RecFNToRecFN_61(); // @[RecFNToRecFN.scala:44:5]
wire [8:0] rawIn_exp = 9'h2B; // @[rawFloatFromRecFN.scala:51:21]
wire [9:0] rawIn_sExp = 10'h2B; // @[rawFloatFromRecFN.scala:55:23, :60:27]
wire [9:0] _rawIn_out_sExp_T = 10'h2B; // @[rawFloatFromRecFN.scala:55:23, :60:27]
wire [1:0] _rawIn_isSpecial_T = 2'h0; // @[rawFloatFromRecFN.scala:53:28, :61:32]
wire [1:0] _rawIn_out_sig_T_1 = 2'h0; // @[rawFloatFromRecFN.scala:53:28, :61:32]
wire [22:0] _rawIn_out_sig_T_2 = 23'h0; // @[rawFloatFromRecFN.scala:61:49]
wire [24:0] rawIn_sig = 25'h0; // @[rawFloatFromRecFN.scala:55:23, :61:44]
wire [24:0] _rawIn_out_sig_T_3 = 25'h0; // @[rawFloatFromRecFN.scala:55:23, :61:44]
wire rawIn_isSpecial = 1'h0; // @[rawFloatFromRecFN.scala:53:53, :55:23, :56:{33,41}, :57:{33,41}, :61:35]
wire rawIn_isNaN = 1'h0; // @[rawFloatFromRecFN.scala:53:53, :55:23, :56:{33,41}, :57:{33,41}, :61:35]
wire rawIn_isInf = 1'h0; // @[rawFloatFromRecFN.scala:53:53, :55:23, :56:{33,41}, :57:{33,41}, :61:35]
wire _rawIn_out_isNaN_T = 1'h0; // @[rawFloatFromRecFN.scala:53:53, :55:23, :56:{33,41}, :57:{33,41}, :61:35]
wire _rawIn_out_isNaN_T_1 = 1'h0; // @[rawFloatFromRecFN.scala:53:53, :55:23, :56:{33,41}, :57:{33,41}, :61:35]
wire _rawIn_out_isInf_T = 1'h0; // @[rawFloatFromRecFN.scala:53:53, :55:23, :56:{33,41}, :57:{33,41}, :61:35]
wire _rawIn_out_isInf_T_2 = 1'h0; // @[rawFloatFromRecFN.scala:53:53, :55:23, :56:{33,41}, :57:{33,41}, :61:35]
wire _rawIn_out_sig_T = 1'h0; // @[rawFloatFromRecFN.scala:53:53, :55:23, :56:{33,41}, :57:{33,41}, :61:35]
wire _io_exceptionFlags_T = 1'h0; // @[rawFloatFromRecFN.scala:53:53, :55:23, :56:{33,41}, :57:{33,41}, :61:35]
wire _io_exceptionFlags_T_2 = 1'h0; // @[rawFloatFromRecFN.scala:53:53, :55:23, :56:{33,41}, :57:{33,41}, :61:35]
wire [4:0] io_exceptionFlags = 5'h0; // @[RecFNToRecFN.scala:44:5, :48:16, :65:54]
wire [4:0] _io_exceptionFlags_T_3 = 5'h0; // @[RecFNToRecFN.scala:44:5, :48:16, :65:54]
wire io_detectTininess = 1'h1; // @[rawFloatFromRecFN.scala:52:53, :55:23, :57:36, :59:25]
wire rawIn_isZero = 1'h1; // @[rawFloatFromRecFN.scala:52:53, :55:23, :57:36, :59:25]
wire rawIn_isZero_0 = 1'h1; // @[rawFloatFromRecFN.scala:52:53, :55:23, :57:36, :59:25]
wire rawIn_sign = 1'h1; // @[rawFloatFromRecFN.scala:52:53, :55:23, :57:36, :59:25]
wire _rawIn_out_isInf_T_1 = 1'h1; // @[rawFloatFromRecFN.scala:52:53, :55:23, :57:36, :59:25]
wire _rawIn_out_sign_T = 1'h1; // @[rawFloatFromRecFN.scala:52:53, :55:23, :57:36, :59:25]
wire _io_exceptionFlags_T_1 = 1'h1; // @[rawFloatFromRecFN.scala:52:53, :55:23, :57:36, :59:25]
wire [2:0] io_roundingMode = 3'h0; // @[rawFloatFromRecFN.scala:52:28]
wire [2:0] _rawIn_isZero_T = 3'h0; // @[rawFloatFromRecFN.scala:52:28]
wire [32:0] io_in = 33'h115800000; // @[RecFNToRecFN.scala:44:5, :48:16, :64:35]
wire [32:0] io_out = 33'h115800000; // @[RecFNToRecFN.scala:44:5, :48:16, :64:35]
wire [32:0] _io_out_T = 33'h115800000; // @[RecFNToRecFN.scala:44:5, :48:16, :64:35]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
| module OptimizationBarrier_PTE_7( // @[package.scala:267:30]
input clock, // @[package.scala:267:30]
input reset, // @[package.scala:267:30]
input [9:0] io_x_reserved_for_future, // @[package.scala:268:18]
input [43:0] io_x_ppn, // @[package.scala:268:18]
input [1:0] io_x_reserved_for_software, // @[package.scala:268:18]
input io_x_d, // @[package.scala:268:18]
input io_x_a, // @[package.scala:268:18]
input io_x_g, // @[package.scala:268:18]
input io_x_u, // @[package.scala:268:18]
input io_x_x, // @[package.scala:268:18]
input io_x_w, // @[package.scala:268:18]
input io_x_r, // @[package.scala:268:18]
input io_x_v, // @[package.scala:268:18]
output [9:0] io_y_reserved_for_future, // @[package.scala:268:18]
output [43:0] io_y_ppn, // @[package.scala:268:18]
output [1:0] io_y_reserved_for_software, // @[package.scala:268:18]
output io_y_d, // @[package.scala:268:18]
output io_y_a, // @[package.scala:268:18]
output io_y_g, // @[package.scala:268:18]
output io_y_u, // @[package.scala:268:18]
output io_y_x, // @[package.scala:268:18]
output io_y_w, // @[package.scala:268:18]
output io_y_r, // @[package.scala:268:18]
output io_y_v // @[package.scala:268:18]
);
wire [9:0] io_x_reserved_for_future_0 = io_x_reserved_for_future; // @[package.scala:267:30]
wire [43:0] io_x_ppn_0 = io_x_ppn; // @[package.scala:267:30]
wire [1:0] io_x_reserved_for_software_0 = io_x_reserved_for_software; // @[package.scala:267:30]
wire io_x_d_0 = io_x_d; // @[package.scala:267:30]
wire io_x_a_0 = io_x_a; // @[package.scala:267:30]
wire io_x_g_0 = io_x_g; // @[package.scala:267:30]
wire io_x_u_0 = io_x_u; // @[package.scala:267:30]
wire io_x_x_0 = io_x_x; // @[package.scala:267:30]
wire io_x_w_0 = io_x_w; // @[package.scala:267:30]
wire io_x_r_0 = io_x_r; // @[package.scala:267:30]
wire io_x_v_0 = io_x_v; // @[package.scala:267:30]
wire [9:0] io_y_reserved_for_future_0 = io_x_reserved_for_future_0; // @[package.scala:267:30]
wire [43:0] io_y_ppn_0 = io_x_ppn_0; // @[package.scala:267:30]
wire [1:0] io_y_reserved_for_software_0 = io_x_reserved_for_software_0; // @[package.scala:267:30]
wire io_y_d_0 = io_x_d_0; // @[package.scala:267:30]
wire io_y_a_0 = io_x_a_0; // @[package.scala:267:30]
wire io_y_g_0 = io_x_g_0; // @[package.scala:267:30]
wire io_y_u_0 = io_x_u_0; // @[package.scala:267:30]
wire io_y_x_0 = io_x_x_0; // @[package.scala:267:30]
wire io_y_w_0 = io_x_w_0; // @[package.scala:267:30]
wire io_y_r_0 = io_x_r_0; // @[package.scala:267:30]
wire io_y_v_0 = io_x_v_0; // @[package.scala:267:30]
assign io_y_reserved_for_future = io_y_reserved_for_future_0; // @[package.scala:267:30]
assign io_y_ppn = io_y_ppn_0; // @[package.scala:267:30]
assign io_y_reserved_for_software = io_y_reserved_for_software_0; // @[package.scala:267:30]
assign io_y_d = io_y_d_0; // @[package.scala:267:30]
assign io_y_a = io_y_a_0; // @[package.scala:267:30]
assign io_y_g = io_y_g_0; // @[package.scala:267:30]
assign io_y_u = io_y_u_0; // @[package.scala:267:30]
assign io_y_x = io_y_x_0; // @[package.scala:267:30]
assign io_y_w = io_y_w_0; // @[package.scala:267:30]
assign io_y_r = io_y_r_0; // @[package.scala:267:30]
assign io_y_v = io_y_v_0; // @[package.scala:267:30]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Monitor.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceLine
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import freechips.rocketchip.diplomacy.EnableMonitors
import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode}
import freechips.rocketchip.util.PlusArg
case class TLMonitorArgs(edge: TLEdge)
abstract class TLMonitorBase(args: TLMonitorArgs) extends Module
{
val io = IO(new Bundle {
val in = Input(new TLBundle(args.edge.bundle))
})
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit
legalize(io.in, args.edge, reset)
}
object TLMonitor {
def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = {
if (enable) {
EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) }
} else { node }
}
}
class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args)
{
require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal))
val cover_prop_class = PropertyClass.Default
//Like assert but can flip to being an assumption for formal verification
def monAssert(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir, cond, message, PropertyClass.Default)
}
def assume(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir.flip, cond, message, PropertyClass.Default)
}
def extra = {
args.edge.sourceInfo match {
case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)"
case _ => ""
}
}
def visible(address: UInt, source: UInt, edge: TLEdge) =
edge.client.clients.map { c =>
!c.sourceId.contains(source) ||
c.visibility.map(_.contains(address)).reduce(_ || _)
}.reduce(_ && _)
def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = {
//switch this flag to turn on diplomacy in error messages
def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n"
monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra)
// Reuse these subexpressions to save some firrtl lines
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility")
//The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B
//TODO: check for acquireT?
when (bundle.opcode === TLMessages.AcquireBlock) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AcquirePerm) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra)
monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra)
monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra)
monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra)
}
}
def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = {
monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility")
// Reuse these subexpressions to save some firrtl lines
val address_ok = edge.manager.containsSafe(edge.address(bundle))
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source
when (bundle.opcode === TLMessages.Probe) {
assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra)
assume (address_ok, "'B' channel Probe carries unmanaged address" + extra)
assume (legal_source, "'B' channel Probe carries source that is not first source" + extra)
assume (is_aligned, "'B' channel Probe address not aligned to size" + extra)
assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra)
assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra)
assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra)
monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra)
monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra)
}
}
def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = {
monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val address_ok = edge.manager.containsSafe(edge.address(bundle))
monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility")
when (bundle.opcode === TLMessages.ProbeAck) {
monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ProbeAckData) {
monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.Release) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ReleaseData) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra)
}
}
def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = {
assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val sink_ok = bundle.sink < edge.manager.endSinkId.U
val deny_put_ok = edge.manager.mayDenyPut.B
val deny_get_ok = edge.manager.mayDenyGet.B
when (bundle.opcode === TLMessages.ReleaseAck) {
assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra)
assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra)
assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra)
}
when (bundle.opcode === TLMessages.Grant) {
assume (source_ok, "'D' channel Grant carries invalid source ID" + extra)
assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra)
assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra)
}
when (bundle.opcode === TLMessages.GrantData) {
assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra)
assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra)
}
}
def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = {
val sink_ok = bundle.sink < edge.manager.endSinkId.U
monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra)
}
def legalizeFormat(bundle: TLBundle, edge: TLEdge) = {
when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) }
when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) }
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) }
when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) }
when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) }
} else {
monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra)
monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra)
monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra)
}
}
def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = {
val a_first = edge.first(a.bits, a.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (a.valid && !a_first) {
monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra)
monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra)
monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra)
monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra)
monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra)
}
when (a.fire && a_first) {
opcode := a.bits.opcode
param := a.bits.param
size := a.bits.size
source := a.bits.source
address := a.bits.address
}
}
def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = {
val b_first = edge.first(b.bits, b.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (b.valid && !b_first) {
monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra)
monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra)
monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra)
monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra)
monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra)
}
when (b.fire && b_first) {
opcode := b.bits.opcode
param := b.bits.param
size := b.bits.size
source := b.bits.source
address := b.bits.address
}
}
def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = {
// Symbolic variable
val sym_source = Wire(UInt(edge.client.endSourceId.W))
// TODO: Connect sym_source to a fixed value for simulation and to a
// free wire in formal
sym_source := 0.U
// Type casting Int to UInt
val maxSourceId = Wire(UInt(edge.client.endSourceId.W))
maxSourceId := edge.client.endSourceId.U
// Delayed verison of sym_source
val sym_source_d = Reg(UInt(edge.client.endSourceId.W))
sym_source_d := sym_source
// These will be constraints for FV setup
Property(
MonitorDirection.Monitor,
(sym_source === sym_source_d),
"sym_source should remain stable",
PropertyClass.Default)
Property(
MonitorDirection.Monitor,
(sym_source <= maxSourceId),
"sym_source should take legal value",
PropertyClass.Default)
val my_resp_pend = RegInit(false.B)
val my_opcode = Reg(UInt())
val my_size = Reg(UInt())
val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire)
val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire)
val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source)
val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source)
val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat)
val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend)
when (my_set_resp_pend) {
my_resp_pend := true.B
} .elsewhen (my_clr_resp_pend) {
my_resp_pend := false.B
}
when (my_a_first_beat) {
my_opcode := bundle.a.bits.opcode
my_size := bundle.a.bits.size
}
val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size)
val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode)
val my_resp_opcode_legal = Wire(Bool())
when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) ||
(my_resp_opcode === TLMessages.LogicalData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData)
} .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck)
} .otherwise {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck)
}
monAssert (IfThen(my_resp_pend, !my_a_first_beat),
"Request message should not be sent with a source ID, for which a response message" +
"is already pending (not received until current cycle) for a prior request message" +
"with the same source ID" + extra)
assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)),
"Response message should be accepted with a source ID only if a request message with the" +
"same source ID has been accepted or is being accepted in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)),
"Response message should be sent with a source ID only if a request message with the" +
"same source ID has been accepted or is being sent in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)),
"If d_valid is 1, then d_size should be same as a_size of the corresponding request" +
"message" + extra)
assume (IfThen(my_d_first_beat, my_resp_opcode_legal),
"If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" +
"request message" + extra)
}
def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = {
val c_first = edge.first(c.bits, c.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (c.valid && !c_first) {
monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra)
monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra)
monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra)
monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra)
monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra)
}
when (c.fire && c_first) {
opcode := c.bits.opcode
param := c.bits.param
size := c.bits.size
source := c.bits.source
address := c.bits.address
}
}
def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = {
val d_first = edge.first(d.bits, d.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val sink = Reg(UInt())
val denied = Reg(Bool())
when (d.valid && !d_first) {
assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra)
assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra)
assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra)
assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra)
assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra)
assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra)
}
when (d.fire && d_first) {
opcode := d.bits.opcode
param := d.bits.param
size := d.bits.size
source := d.bits.source
sink := d.bits.sink
denied := d.bits.denied
}
}
def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = {
legalizeMultibeatA(bundle.a, edge)
legalizeMultibeatD(bundle.d, edge)
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
legalizeMultibeatB(bundle.b, edge)
legalizeMultibeatC(bundle.c, edge)
}
}
//This is left in for almond which doesn't adhere to the tilelink protocol
@deprecated("Use legalizeADSource instead if possible","")
def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.client.endSourceId.W))
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val a_set = WireInit(0.U(edge.client.endSourceId.W))
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra)
}
if (edge.manager.minLatency > 0) {
assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = {
val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size)
val log_a_size_bus_size = log2Ceil(a_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error
inflight.suggestName("inflight")
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
inflight_opcodes.suggestName("inflight_opcodes")
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
inflight_sizes.suggestName("inflight_sizes")
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
a_first.suggestName("a_first")
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
d_first.suggestName("d_first")
val a_set = WireInit(0.U(edge.client.endSourceId.W))
val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
a_set.suggestName("a_set")
a_set_wo_ready.suggestName("a_set_wo_ready")
val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
a_opcodes_set.suggestName("a_opcodes_set")
val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
a_sizes_set.suggestName("a_sizes_set")
val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W))
a_opcode_lookup.suggestName("a_opcode_lookup")
a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U
val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W))
a_size_lookup.suggestName("a_size_lookup")
a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U
val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant))
val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant))
val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W))
a_opcodes_set_interm.suggestName("a_opcodes_set_interm")
val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W))
a_sizes_set_interm.suggestName("a_sizes_set_interm")
when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) {
a_set_wo_ready := UIntToOH(bundle.a.bits.source)
}
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U
a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U
a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U)
a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U)
monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
d_opcodes_clr.suggestName("d_opcodes_clr")
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) ||
(bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra)
assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) ||
(bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra)
assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) {
assume((!bundle.d.ready) || bundle.a.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = {
val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size)
val log_c_size_bus_size = log2Ceil(c_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W))
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
inflight.suggestName("inflight")
inflight_opcodes.suggestName("inflight_opcodes")
inflight_sizes.suggestName("inflight_sizes")
val c_first = edge.first(bundle.c.bits, bundle.c.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
c_first.suggestName("c_first")
d_first.suggestName("d_first")
val c_set = WireInit(0.U(edge.client.endSourceId.W))
val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
c_set.suggestName("c_set")
c_set_wo_ready.suggestName("c_set_wo_ready")
c_opcodes_set.suggestName("c_opcodes_set")
c_sizes_set.suggestName("c_sizes_set")
val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W))
val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W))
c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U
c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U
c_opcode_lookup.suggestName("c_opcode_lookup")
c_size_lookup.suggestName("c_size_lookup")
val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W))
val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W))
c_opcodes_set_interm.suggestName("c_opcodes_set_interm")
c_sizes_set_interm.suggestName("c_sizes_set_interm")
when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) {
c_set_wo_ready := UIntToOH(bundle.c.bits.source)
}
when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) {
c_set := UIntToOH(bundle.c.bits.source)
c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U
c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U
c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U)
c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U)
monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra)
}
val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
d_opcodes_clr.suggestName("d_opcodes_clr")
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) {
assume((!bundle.d.ready) || bundle.c.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
when (c_set_wo_ready.orR) {
assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra)
}
}
inflight := (inflight | c_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.manager.endSinkId.W))
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val e_first = true.B
val d_set = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) {
d_set := UIntToOH(bundle.d.bits.sink)
assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra)
}
val e_clr = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) {
e_clr := UIntToOH(bundle.e.bits.sink)
monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra)
}
// edge.client.minLatency applies to BC, not DE
inflight := (inflight | d_set) & ~e_clr
}
def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = {
val sourceBits = log2Ceil(edge.client.endSourceId)
val tooBig = 14 // >16kB worth of flight information gets to be too much
if (sourceBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked")
} else {
if (args.edge.params(TestplanTestType).simulation) {
if (args.edge.params(TLMonitorStrictMode)) {
legalizeADSource(bundle, edge)
legalizeCDSource(bundle, edge)
} else {
legalizeADSourceOld(bundle, edge)
}
}
if (args.edge.params(TestplanTestType).formal) {
legalizeADSourceFormal(bundle, edge)
}
}
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
// legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize...
val sinkBits = log2Ceil(edge.manager.endSinkId)
if (sinkBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked")
} else {
legalizeDESink(bundle, edge)
}
}
}
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = {
legalizeFormat (bundle, edge)
legalizeMultibeat (bundle, edge)
legalizeUnique (bundle, edge)
}
}
File Misc.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
import chisel3.util.random.LFSR
import org.chipsalliance.cde.config.Parameters
import scala.math._
class ParameterizedBundle(implicit p: Parameters) extends Bundle
trait Clocked extends Bundle {
val clock = Clock()
val reset = Bool()
}
object DecoupledHelper {
def apply(rvs: Bool*) = new DecoupledHelper(rvs)
}
class DecoupledHelper(val rvs: Seq[Bool]) {
def fire(exclude: Bool, includes: Bool*) = {
require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!")
(rvs.filter(_ ne exclude) ++ includes).reduce(_ && _)
}
def fire() = {
rvs.reduce(_ && _)
}
}
object MuxT {
def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2))
def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3))
def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4))
}
/** Creates a cascade of n MuxTs to search for a key value. */
object MuxTLookup {
def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
}
object ValidMux {
def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = {
apply(v1 +: v2.toSeq)
}
def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = {
val out = Wire(Valid(valids.head.bits.cloneType))
out.valid := valids.map(_.valid).reduce(_ || _)
out.bits := MuxCase(valids.head.bits,
valids.map(v => (v.valid -> v.bits)))
out
}
}
object Str
{
def apply(s: String): UInt = {
var i = BigInt(0)
require(s.forall(validChar _))
for (c <- s)
i = (i << 8) | c
i.U((s.length*8).W)
}
def apply(x: Char): UInt = {
require(validChar(x))
x.U(8.W)
}
def apply(x: UInt): UInt = apply(x, 10)
def apply(x: UInt, radix: Int): UInt = {
val rad = radix.U
val w = x.getWidth
require(w > 0)
var q = x
var s = digit(q % rad)
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s)
}
s
}
def apply(x: SInt): UInt = apply(x, 10)
def apply(x: SInt, radix: Int): UInt = {
val neg = x < 0.S
val abs = x.abs.asUInt
if (radix != 10) {
Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix))
} else {
val rad = radix.U
val w = abs.getWidth
require(w > 0)
var q = abs
var s = digit(q % rad)
var needSign = neg
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
val placeSpace = q === 0.U
val space = Mux(needSign, Str('-'), Str(' '))
needSign = needSign && !placeSpace
s = Cat(Mux(placeSpace, space, digit(q % rad)), s)
}
Cat(Mux(needSign, Str('-'), Str(' ')), s)
}
}
private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0)
private def validChar(x: Char) = x == (x & 0xFF)
}
object Split
{
def apply(x: UInt, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n2: Int, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
}
object Random
{
def apply(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0)
else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod))
}
def apply(mod: Int): UInt = apply(mod, randomizer)
def oneHot(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0))
else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt
}
def oneHot(mod: Int): UInt = oneHot(mod, randomizer)
private def randomizer = LFSR(16)
private def partition(value: UInt, slices: Int) =
Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U)
}
object Majority {
def apply(in: Set[Bool]): Bool = {
val n = (in.size >> 1) + 1
val clauses = in.subsets(n).map(_.reduce(_ && _))
clauses.reduce(_ || _)
}
def apply(in: Seq[Bool]): Bool = apply(in.toSet)
def apply(in: UInt): Bool = apply(in.asBools.toSet)
}
object PopCountAtLeast {
private def two(x: UInt): (Bool, Bool) = x.getWidth match {
case 1 => (x.asBool, false.B)
case n =>
val half = x.getWidth / 2
val (leftOne, leftTwo) = two(x(half - 1, 0))
val (rightOne, rightTwo) = two(x(x.getWidth - 1, half))
(leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne))
}
def apply(x: UInt, n: Int): Bool = n match {
case 0 => true.B
case 1 => x.orR
case 2 => two(x)._2
case 3 => PopCount(x) >= n.U
}
}
// This gets used everywhere, so make the smallest circuit possible ...
// Given an address and size, create a mask of beatBytes size
// eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111
// groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01
object MaskGen {
def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = {
require (groupBy >= 1 && beatBytes >= groupBy)
require (isPow2(beatBytes) && isPow2(groupBy))
val lgBytes = log2Ceil(beatBytes)
val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U
def helper(i: Int): Seq[(Bool, Bool)] = {
if (i == 0) {
Seq((lgSize >= lgBytes.asUInt, true.B))
} else {
val sub = helper(i-1)
val size = sizeOH(lgBytes - i)
val bit = addr_lo(lgBytes - i)
val nbit = !bit
Seq.tabulate (1 << i) { j =>
val (sub_acc, sub_eq) = sub(j/2)
val eq = sub_eq && (if (j % 2 == 1) bit else nbit)
val acc = sub_acc || (size && eq)
(acc, eq)
}
}
}
if (groupBy == beatBytes) 1.U else
Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse)
}
}
File PlusArg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.experimental._
import chisel3.util.HasBlackBoxResource
@deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05")
case class PlusArgInfo(default: BigInt, docstring: String)
/** Case class for PlusArg information
*
* @tparam A scala type of the PlusArg value
* @param default optional default value
* @param docstring text to include in the help
* @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT)
*/
private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String)
/** Typeclass for converting a type to a doctype string
* @tparam A some type
*/
trait Doctypeable[A] {
/** Return the doctype string for some option */
def toDoctype(a: Option[A]): String
}
/** Object containing implementations of the Doctypeable typeclass */
object Doctypes {
/** Converts an Int => "INT" */
implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" }
/** Converts a BigInt => "INT" */
implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" }
/** Converts a String => "STRING" */
implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" }
}
class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map(
"FORMAT" -> StringParam(format),
"DEFAULT" -> IntParam(default),
"WIDTH" -> IntParam(width)
)) with HasBlackBoxResource {
val io = IO(new Bundle {
val out = Output(UInt(width.W))
})
addResource("/vsrc/plusarg_reader.v")
}
/* This wrapper class has no outputs, making it clear it is a simulation-only construct */
class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module {
val io = IO(new Bundle {
val count = Input(UInt(width.W))
})
val max = Module(new plusarg_reader(format, default, docstring, width)).io.out
when (max > 0.U) {
assert (io.count < max, s"Timeout exceeded: $docstring")
}
}
import Doctypes._
object PlusArg
{
/** PlusArg("foo") will return 42.U if the simulation is run with +foo=42
* Do not use this as an initial register value. The value is set in an
* initial block and thus accessing it from another initial is racey.
* Add a docstring to document the arg, which can be dumped in an elaboration
* pass.
*/
def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out
}
/** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert
* to kill the simulation when count exceeds the specified integer argument.
* Default 0 will never assert.
*/
def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count
}
}
object PlusArgArtefacts {
private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty
/* Add a new PlusArg */
@deprecated(
"Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08",
"Rocket Chip 2020.05"
)
def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring)
/** Add a new PlusArg
*
* @tparam A scala type of the PlusArg value
* @param name name for the PlusArg
* @param default optional default value
* @param docstring text to include in the help
*/
def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit =
artefacts = artefacts ++
Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default)))
/* From plus args, generate help text */
private def serializeHelp_cHeader(tab: String = ""): String = artefacts
.map{ case(arg, info) =>
s"""|$tab+$arg=${info.doctype}\\n\\
|$tab${" "*20}${info.docstring}\\n\\
|""".stripMargin ++ info.default.map{ case default =>
s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("")
}.toSeq.mkString("\\n\\\n") ++ "\""
/* From plus args, generate a char array of their names */
private def serializeArray_cHeader(tab: String = ""): String = {
val prettyTab = tab + " " * 44 // Length of 'static const ...'
s"${tab}static const char * verilog_plusargs [] = {\\\n" ++
artefacts
.map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" }
.mkString("")++
s"${prettyTab}0};"
}
/* Generate C code to be included in emulator.cc that helps with
* argument parsing based on available Verilog PlusArgs */
def serialize_cHeader(): String =
s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\
|${serializeHelp_cHeader(" "*7)}
|${serializeArray_cHeader()}
|""".stripMargin
}
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
File Bundles.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import freechips.rocketchip.util._
import scala.collection.immutable.ListMap
import chisel3.util.Decoupled
import chisel3.util.DecoupledIO
import chisel3.reflect.DataMirror
abstract class TLBundleBase(val params: TLBundleParameters) extends Bundle
// common combos in lazy policy:
// Put + Acquire
// Release + AccessAck
object TLMessages
{
// A B C D E
def PutFullData = 0.U // . . => AccessAck
def PutPartialData = 1.U // . . => AccessAck
def ArithmeticData = 2.U // . . => AccessAckData
def LogicalData = 3.U // . . => AccessAckData
def Get = 4.U // . . => AccessAckData
def Hint = 5.U // . . => HintAck
def AcquireBlock = 6.U // . => Grant[Data]
def AcquirePerm = 7.U // . => Grant[Data]
def Probe = 6.U // . => ProbeAck[Data]
def AccessAck = 0.U // . .
def AccessAckData = 1.U // . .
def HintAck = 2.U // . .
def ProbeAck = 4.U // .
def ProbeAckData = 5.U // .
def Release = 6.U // . => ReleaseAck
def ReleaseData = 7.U // . => ReleaseAck
def Grant = 4.U // . => GrantAck
def GrantData = 5.U // . => GrantAck
def ReleaseAck = 6.U // .
def GrantAck = 0.U // .
def isA(x: UInt) = x <= AcquirePerm
def isB(x: UInt) = x <= Probe
def isC(x: UInt) = x <= ReleaseData
def isD(x: UInt) = x <= ReleaseAck
def adResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, Grant, Grant)
def bcResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, ProbeAck, ProbeAck)
def a = Seq( ("PutFullData",TLPermissions.PermMsgReserved),
("PutPartialData",TLPermissions.PermMsgReserved),
("ArithmeticData",TLAtomics.ArithMsg),
("LogicalData",TLAtomics.LogicMsg),
("Get",TLPermissions.PermMsgReserved),
("Hint",TLHints.HintsMsg),
("AcquireBlock",TLPermissions.PermMsgGrow),
("AcquirePerm",TLPermissions.PermMsgGrow))
def b = Seq( ("PutFullData",TLPermissions.PermMsgReserved),
("PutPartialData",TLPermissions.PermMsgReserved),
("ArithmeticData",TLAtomics.ArithMsg),
("LogicalData",TLAtomics.LogicMsg),
("Get",TLPermissions.PermMsgReserved),
("Hint",TLHints.HintsMsg),
("Probe",TLPermissions.PermMsgCap))
def c = Seq( ("AccessAck",TLPermissions.PermMsgReserved),
("AccessAckData",TLPermissions.PermMsgReserved),
("HintAck",TLPermissions.PermMsgReserved),
("Invalid Opcode",TLPermissions.PermMsgReserved),
("ProbeAck",TLPermissions.PermMsgReport),
("ProbeAckData",TLPermissions.PermMsgReport),
("Release",TLPermissions.PermMsgReport),
("ReleaseData",TLPermissions.PermMsgReport))
def d = Seq( ("AccessAck",TLPermissions.PermMsgReserved),
("AccessAckData",TLPermissions.PermMsgReserved),
("HintAck",TLPermissions.PermMsgReserved),
("Invalid Opcode",TLPermissions.PermMsgReserved),
("Grant",TLPermissions.PermMsgCap),
("GrantData",TLPermissions.PermMsgCap),
("ReleaseAck",TLPermissions.PermMsgReserved))
}
/**
* The three primary TileLink permissions are:
* (T)runk: the agent is (or is on inwards path to) the global point of serialization.
* (B)ranch: the agent is on an outwards path to
* (N)one:
* These permissions are permuted by transfer operations in various ways.
* Operations can cap permissions, request for them to be grown or shrunk,
* or for a report on their current status.
*/
object TLPermissions
{
val aWidth = 2
val bdWidth = 2
val cWidth = 3
// Cap types (Grant = new permissions, Probe = permisions <= target)
def toT = 0.U(bdWidth.W)
def toB = 1.U(bdWidth.W)
def toN = 2.U(bdWidth.W)
def isCap(x: UInt) = x <= toN
// Grow types (Acquire = permissions >= target)
def NtoB = 0.U(aWidth.W)
def NtoT = 1.U(aWidth.W)
def BtoT = 2.U(aWidth.W)
def isGrow(x: UInt) = x <= BtoT
// Shrink types (ProbeAck, Release)
def TtoB = 0.U(cWidth.W)
def TtoN = 1.U(cWidth.W)
def BtoN = 2.U(cWidth.W)
def isShrink(x: UInt) = x <= BtoN
// Report types (ProbeAck, Release)
def TtoT = 3.U(cWidth.W)
def BtoB = 4.U(cWidth.W)
def NtoN = 5.U(cWidth.W)
def isReport(x: UInt) = x <= NtoN
def PermMsgGrow:Seq[String] = Seq("Grow NtoB", "Grow NtoT", "Grow BtoT")
def PermMsgCap:Seq[String] = Seq("Cap toT", "Cap toB", "Cap toN")
def PermMsgReport:Seq[String] = Seq("Shrink TtoB", "Shrink TtoN", "Shrink BtoN", "Report TotT", "Report BtoB", "Report NtoN")
def PermMsgReserved:Seq[String] = Seq("Reserved")
}
object TLAtomics
{
val width = 3
// Arithmetic types
def MIN = 0.U(width.W)
def MAX = 1.U(width.W)
def MINU = 2.U(width.W)
def MAXU = 3.U(width.W)
def ADD = 4.U(width.W)
def isArithmetic(x: UInt) = x <= ADD
// Logical types
def XOR = 0.U(width.W)
def OR = 1.U(width.W)
def AND = 2.U(width.W)
def SWAP = 3.U(width.W)
def isLogical(x: UInt) = x <= SWAP
def ArithMsg:Seq[String] = Seq("MIN", "MAX", "MINU", "MAXU", "ADD")
def LogicMsg:Seq[String] = Seq("XOR", "OR", "AND", "SWAP")
}
object TLHints
{
val width = 1
def PREFETCH_READ = 0.U(width.W)
def PREFETCH_WRITE = 1.U(width.W)
def isHints(x: UInt) = x <= PREFETCH_WRITE
def HintsMsg:Seq[String] = Seq("PrefetchRead", "PrefetchWrite")
}
sealed trait TLChannel extends TLBundleBase {
val channelName: String
}
sealed trait TLDataChannel extends TLChannel
sealed trait TLAddrChannel extends TLDataChannel
final class TLBundleA(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleA_${params.shortName}"
val channelName = "'A' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(List(TLAtomics.width, TLPermissions.aWidth, TLHints.width).max.W) // amo_opcode || grow perms || hint
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // from
val address = UInt(params.addressBits.W) // to
val user = BundleMap(params.requestFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val mask = UInt((params.dataBits/8).W)
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleB(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleB_${params.shortName}"
val channelName = "'B' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.bdWidth.W) // cap perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // to
val address = UInt(params.addressBits.W) // from
// variable fields during multibeat:
val mask = UInt((params.dataBits/8).W)
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleC(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleC_${params.shortName}"
val channelName = "'C' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.cWidth.W) // shrink or report perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // from
val address = UInt(params.addressBits.W) // to
val user = BundleMap(params.requestFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleD(params: TLBundleParameters)
extends TLBundleBase(params) with TLDataChannel
{
override def typeName = s"TLBundleD_${params.shortName}"
val channelName = "'D' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.bdWidth.W) // cap perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // to
val sink = UInt(params.sinkBits.W) // from
val denied = Bool() // implies corrupt iff *Data
val user = BundleMap(params.responseFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleE(params: TLBundleParameters)
extends TLBundleBase(params) with TLChannel
{
override def typeName = s"TLBundleE_${params.shortName}"
val channelName = "'E' channel"
val sink = UInt(params.sinkBits.W) // to
}
class TLBundle(val params: TLBundleParameters) extends Record
{
// Emulate a Bundle with elements abcde or ad depending on params.hasBCE
private val optA = Some (Decoupled(new TLBundleA(params)))
private val optB = params.hasBCE.option(Flipped(Decoupled(new TLBundleB(params))))
private val optC = params.hasBCE.option(Decoupled(new TLBundleC(params)))
private val optD = Some (Flipped(Decoupled(new TLBundleD(params))))
private val optE = params.hasBCE.option(Decoupled(new TLBundleE(params)))
def a: DecoupledIO[TLBundleA] = optA.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleA(params)))))
def b: DecoupledIO[TLBundleB] = optB.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleB(params)))))
def c: DecoupledIO[TLBundleC] = optC.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleC(params)))))
def d: DecoupledIO[TLBundleD] = optD.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleD(params)))))
def e: DecoupledIO[TLBundleE] = optE.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleE(params)))))
val elements =
if (params.hasBCE) ListMap("e" -> e, "d" -> d, "c" -> c, "b" -> b, "a" -> a)
else ListMap("d" -> d, "a" -> a)
def tieoff(): Unit = {
DataMirror.specifiedDirectionOf(a.ready) match {
case SpecifiedDirection.Input =>
a.ready := false.B
c.ready := false.B
e.ready := false.B
b.valid := false.B
d.valid := false.B
case SpecifiedDirection.Output =>
a.valid := false.B
c.valid := false.B
e.valid := false.B
b.ready := false.B
d.ready := false.B
case _ =>
}
}
}
object TLBundle
{
def apply(params: TLBundleParameters) = new TLBundle(params)
}
class TLAsyncBundleBase(val params: TLAsyncBundleParameters) extends Bundle
class TLAsyncBundle(params: TLAsyncBundleParameters) extends TLAsyncBundleBase(params)
{
val a = new AsyncBundle(new TLBundleA(params.base), params.async)
val b = Flipped(new AsyncBundle(new TLBundleB(params.base), params.async))
val c = new AsyncBundle(new TLBundleC(params.base), params.async)
val d = Flipped(new AsyncBundle(new TLBundleD(params.base), params.async))
val e = new AsyncBundle(new TLBundleE(params.base), params.async)
}
class TLRationalBundle(params: TLBundleParameters) extends TLBundleBase(params)
{
val a = RationalIO(new TLBundleA(params))
val b = Flipped(RationalIO(new TLBundleB(params)))
val c = RationalIO(new TLBundleC(params))
val d = Flipped(RationalIO(new TLBundleD(params)))
val e = RationalIO(new TLBundleE(params))
}
class TLCreditedBundle(params: TLBundleParameters) extends TLBundleBase(params)
{
val a = CreditedIO(new TLBundleA(params))
val b = Flipped(CreditedIO(new TLBundleB(params)))
val c = CreditedIO(new TLBundleC(params))
val d = Flipped(CreditedIO(new TLBundleD(params)))
val e = CreditedIO(new TLBundleE(params))
}
File Parameters.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.diplomacy
import chisel3._
import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor}
import freechips.rocketchip.util.ShiftQueue
/** Options for describing the attributes of memory regions */
object RegionType {
// Define the 'more relaxed than' ordering
val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS)
sealed trait T extends Ordered[T] {
def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this)
}
case object CACHED extends T // an intermediate agent may have cached a copy of the region for you
case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided
case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible
case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached
case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects
case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed
case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively
}
// A non-empty half-open range; [start, end)
case class IdRange(start: Int, end: Int) extends Ordered[IdRange]
{
require (start >= 0, s"Ids cannot be negative, but got: $start.")
require (start <= end, "Id ranges cannot be negative.")
def compare(x: IdRange) = {
val primary = (this.start - x.start).signum
val secondary = (x.end - this.end).signum
if (primary != 0) primary else secondary
}
def overlaps(x: IdRange) = start < x.end && x.start < end
def contains(x: IdRange) = start <= x.start && x.end <= end
def contains(x: Int) = start <= x && x < end
def contains(x: UInt) =
if (size == 0) {
false.B
} else if (size == 1) { // simple comparison
x === start.U
} else {
// find index of largest different bit
val largestDeltaBit = log2Floor(start ^ (end-1))
val smallestCommonBit = largestDeltaBit + 1 // may not exist in x
val uncommonMask = (1 << smallestCommonBit) - 1
val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0)
// the prefix must match exactly (note: may shift ALL bits away)
(x >> smallestCommonBit) === (start >> smallestCommonBit).U &&
// firrtl constant prop range analysis can eliminate these two:
(start & uncommonMask).U <= uncommonBits &&
uncommonBits <= ((end-1) & uncommonMask).U
}
def shift(x: Int) = IdRange(start+x, end+x)
def size = end - start
def isEmpty = end == start
def range = start until end
}
object IdRange
{
def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else {
val ranges = s.sorted
(ranges.tail zip ranges.init) find { case (a, b) => a overlaps b }
}
}
// An potentially empty inclusive range of 2-powers [min, max] (in bytes)
case class TransferSizes(min: Int, max: Int)
{
def this(x: Int) = this(x, x)
require (min <= max, s"Min transfer $min > max transfer $max")
require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)")
require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max")
require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min")
require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)")
def none = min == 0
def contains(x: Int) = isPow2(x) && min <= x && x <= max
def containsLg(x: Int) = contains(1 << x)
def containsLg(x: UInt) =
if (none) false.B
else if (min == max) { log2Ceil(min).U === x }
else { log2Ceil(min).U <= x && x <= log2Ceil(max).U }
def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max)
def intersect(x: TransferSizes) =
if (x.max < min || max < x.min) TransferSizes.none
else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max))
// Not a union, because the result may contain sizes contained by neither term
// NOT TO BE CONFUSED WITH COVERPOINTS
def mincover(x: TransferSizes) = {
if (none) {
x
} else if (x.none) {
this
} else {
TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max))
}
}
override def toString() = "TransferSizes[%d, %d]".format(min, max)
}
object TransferSizes {
def apply(x: Int) = new TransferSizes(x)
val none = new TransferSizes(0)
def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _)
def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _)
implicit def asBool(x: TransferSizes) = !x.none
}
// AddressSets specify the address space managed by the manager
// Base is the base address, and mask are the bits consumed by the manager
// e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff
// e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ...
case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet]
{
// Forbid misaligned base address (and empty sets)
require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}")
require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous
// We do allow negative mask (=> ignore all high bits)
def contains(x: BigInt) = ((x ^ base) & ~mask) == 0
def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S
// turn x into an address contained in this set
def legalize(x: UInt): UInt = base.U | (mask.U & x)
// overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1)
def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0
// contains iff bitwise: x.mask => mask && contains(x.base)
def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0
// The number of bytes to which the manager must be aligned
def alignment = ((mask + 1) & ~mask)
// Is this a contiguous memory range
def contiguous = alignment == mask+1
def finite = mask >= 0
def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask }
// Widen the match function to ignore all bits in imask
def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask)
// Return an AddressSet that only contains the addresses both sets contain
def intersect(x: AddressSet): Option[AddressSet] = {
if (!overlaps(x)) {
None
} else {
val r_mask = mask & x.mask
val r_base = base | x.base
Some(AddressSet(r_base, r_mask))
}
}
def subtract(x: AddressSet): Seq[AddressSet] = {
intersect(x) match {
case None => Seq(this)
case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit =>
val nmask = (mask & (bit-1)) | remove.mask
val nbase = (remove.base ^ bit) & ~nmask
AddressSet(nbase, nmask)
}
}
}
// AddressSets have one natural Ordering (the containment order, if contiguous)
def compare(x: AddressSet) = {
val primary = (this.base - x.base).signum // smallest address first
val secondary = (x.mask - this.mask).signum // largest mask first
if (primary != 0) primary else secondary
}
// We always want to see things in hex
override def toString() = {
if (mask >= 0) {
"AddressSet(0x%x, 0x%x)".format(base, mask)
} else {
"AddressSet(0x%x, ~0x%x)".format(base, ~mask)
}
}
def toRanges = {
require (finite, "Ranges cannot be calculated on infinite mask")
val size = alignment
val fragments = mask & ~(size-1)
val bits = bitIndexes(fragments)
(BigInt(0) until (BigInt(1) << bits.size)).map { i =>
val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) }
AddressRange(off, size)
}
}
}
object AddressSet
{
val everything = AddressSet(0, -1)
def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = {
if (size == 0) tail.reverse else {
val maxBaseAlignment = base & (-base) // 0 for infinite (LSB)
val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size
val step =
if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment)
maxSizeAlignment else maxBaseAlignment
misaligned(base+step, size-step, AddressSet(base, step-1) +: tail)
}
}
def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = {
// Pair terms up by ignoring 'bit'
seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) =>
if (seq.size == 1) {
seq.head // singleton -> unaffected
} else {
key.copy(mask = key.mask | bit) // pair - widen mask by bit
}
}.toList
}
def unify(seq: Seq[AddressSet]): Seq[AddressSet] = {
val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _)
AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted
}
def enumerateMask(mask: BigInt): Seq[BigInt] = {
def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] =
if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail)
helper(0, Nil)
}
def enumerateBits(mask: BigInt): Seq[BigInt] = {
def helper(x: BigInt): Seq[BigInt] = {
if (x == 0) {
Nil
} else {
val bit = x & (-x)
bit +: helper(x & ~bit)
}
}
helper(mask)
}
}
case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean)
{
require (depth >= 0, "Buffer depth must be >= 0")
def isDefined = depth > 0
def latency = if (isDefined && !flow) 1 else 0
def apply[T <: Data](x: DecoupledIO[T]) =
if (isDefined) Queue(x, depth, flow=flow, pipe=pipe)
else x
def irrevocable[T <: Data](x: ReadyValidIO[T]) =
if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe)
else x
def sq[T <: Data](x: DecoupledIO[T]) =
if (!isDefined) x else {
val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe))
sq.io.enq <> x
sq.io.deq
}
override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "")
}
object BufferParams
{
implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false)
val default = BufferParams(2)
val none = BufferParams(0)
val flow = BufferParams(1, true, false)
val pipe = BufferParams(1, false, true)
}
case class TriStateValue(value: Boolean, set: Boolean)
{
def update(orig: Boolean) = if (set) value else orig
}
object TriStateValue
{
implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true)
def unset = TriStateValue(false, false)
}
trait DirectedBuffers[T] {
def copyIn(x: BufferParams): T
def copyOut(x: BufferParams): T
def copyInOut(x: BufferParams): T
}
trait IdMapEntry {
def name: String
def from: IdRange
def to: IdRange
def isCache: Boolean
def requestFifo: Boolean
def maxTransactionsInFlight: Option[Int]
def pretty(fmt: String) =
if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5
fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
} else {
fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
}
}
abstract class IdMap[T <: IdMapEntry] {
protected val fmt: String
val mapping: Seq[T]
def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n")
}
File Edges.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
class TLEdge(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdgeParameters(client, manager, params, sourceInfo)
{
def isAligned(address: UInt, lgSize: UInt): Bool = {
if (maxLgSize == 0) true.B else {
val mask = UIntToOH1(lgSize, maxLgSize)
(address & mask) === 0.U
}
}
def mask(address: UInt, lgSize: UInt): UInt =
MaskGen(address, lgSize, manager.beatBytes)
def staticHasData(bundle: TLChannel): Option[Boolean] = {
bundle match {
case _:TLBundleA => {
// Do there exist A messages with Data?
val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial
// Do there exist A messages without Data?
val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint
// Statically optimize the case where hasData is a constant
if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None
}
case _:TLBundleB => {
// Do there exist B messages with Data?
val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial
// Do there exist B messages without Data?
val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint
// Statically optimize the case where hasData is a constant
if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None
}
case _:TLBundleC => {
// Do there eixst C messages with Data?
val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe
// Do there exist C messages without Data?
val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe
if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None
}
case _:TLBundleD => {
// Do there eixst D messages with Data?
val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB
// Do there exist D messages without Data?
val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT
if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None
}
case _:TLBundleE => Some(false)
}
}
def isRequest(x: TLChannel): Bool = {
x match {
case a: TLBundleA => true.B
case b: TLBundleB => true.B
case c: TLBundleC => c.opcode(2) && c.opcode(1)
// opcode === TLMessages.Release ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(2) && !d.opcode(1)
// opcode === TLMessages.Grant ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
}
def isResponse(x: TLChannel): Bool = {
x match {
case a: TLBundleA => false.B
case b: TLBundleB => false.B
case c: TLBundleC => !c.opcode(2) || !c.opcode(1)
// opcode =/= TLMessages.Release &&
// opcode =/= TLMessages.ReleaseData
case d: TLBundleD => true.B // Grant isResponse + isRequest
case e: TLBundleE => true.B
}
}
def hasData(x: TLChannel): Bool = {
val opdata = x match {
case a: TLBundleA => !a.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case b: TLBundleB => !b.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case c: TLBundleC => c.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.ProbeAckData ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
staticHasData(x).map(_.B).getOrElse(opdata)
}
def opcode(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.opcode
case b: TLBundleB => b.opcode
case c: TLBundleC => c.opcode
case d: TLBundleD => d.opcode
}
}
def param(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.param
case b: TLBundleB => b.param
case c: TLBundleC => c.param
case d: TLBundleD => d.param
}
}
def size(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.size
case b: TLBundleB => b.size
case c: TLBundleC => c.size
case d: TLBundleD => d.size
}
}
def data(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.data
case b: TLBundleB => b.data
case c: TLBundleC => c.data
case d: TLBundleD => d.data
}
}
def corrupt(x: TLDataChannel): Bool = {
x match {
case a: TLBundleA => a.corrupt
case b: TLBundleB => b.corrupt
case c: TLBundleC => c.corrupt
case d: TLBundleD => d.corrupt
}
}
def mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.mask
case b: TLBundleB => b.mask
case c: TLBundleC => mask(c.address, c.size)
}
}
def full_mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => mask(a.address, a.size)
case b: TLBundleB => mask(b.address, b.size)
case c: TLBundleC => mask(c.address, c.size)
}
}
def address(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.address
case b: TLBundleB => b.address
case c: TLBundleC => c.address
}
}
def source(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.source
case b: TLBundleB => b.source
case c: TLBundleC => c.source
case d: TLBundleD => d.source
}
}
def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes)
def addr_lo(x: UInt): UInt =
if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0)
def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x))
def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x))
def numBeats(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 1.U
case bundle: TLDataChannel => {
val hasData = this.hasData(bundle)
val size = this.size(bundle)
val cutoff = log2Ceil(manager.beatBytes)
val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U
val decode = UIntToOH(size, maxLgSize+1) >> cutoff
Mux(hasData, decode | small.asUInt, 1.U)
}
}
}
def numBeats1(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 0.U
case bundle: TLDataChannel => {
if (maxLgSize == 0) {
0.U
} else {
val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes)
Mux(hasData(bundle), decode, 0.U)
}
}
}
}
def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val beats1 = numBeats1(bits)
val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W))
val counter1 = counter - 1.U
val first = counter === 0.U
val last = counter === 1.U || beats1 === 0.U
val done = last && fire
val count = (beats1 & ~counter1)
when (fire) {
counter := Mux(first, beats1, counter1)
}
(first, last, done, count)
}
def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1
def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire)
def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid)
def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2
def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire)
def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid)
def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3
def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire)
def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid)
def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3)
}
def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire)
def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid)
def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4)
}
def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire)
def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid)
def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes))
}
def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire)
def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid)
// Does the request need T permissions to be executed?
def needT(a: TLBundleA): Bool = {
val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLPermissions.NtoB -> false.B,
TLPermissions.NtoT -> true.B,
TLPermissions.BtoT -> true.B))
MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array(
TLMessages.PutFullData -> true.B,
TLMessages.PutPartialData -> true.B,
TLMessages.ArithmeticData -> true.B,
TLMessages.LogicalData -> true.B,
TLMessages.Get -> false.B,
TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLHints.PREFETCH_READ -> false.B,
TLHints.PREFETCH_WRITE -> true.B)),
TLMessages.AcquireBlock -> acq_needT,
TLMessages.AcquirePerm -> acq_needT))
}
// This is a very expensive circuit; use only if you really mean it!
def inFlight(x: TLBundle): (UInt, UInt) = {
val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W))
val bce = manager.anySupportAcquireB && client.anySupportProbe
val (a_first, a_last, _) = firstlast(x.a)
val (b_first, b_last, _) = firstlast(x.b)
val (c_first, c_last, _) = firstlast(x.c)
val (d_first, d_last, _) = firstlast(x.d)
val (e_first, e_last, _) = firstlast(x.e)
val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits))
val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits))
val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits))
val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits))
val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits))
val a_inc = x.a.fire && a_first && a_request
val b_inc = x.b.fire && b_first && b_request
val c_inc = x.c.fire && c_first && c_request
val d_inc = x.d.fire && d_first && d_request
val e_inc = x.e.fire && e_first && e_request
val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil))
val a_dec = x.a.fire && a_last && a_response
val b_dec = x.b.fire && b_last && b_response
val c_dec = x.c.fire && c_last && c_response
val d_dec = x.d.fire && d_last && d_response
val e_dec = x.e.fire && e_last && e_response
val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil))
val next_flight = flight + PopCount(inc) - PopCount(dec)
flight := next_flight
(flight, next_flight)
}
def prettySourceMapping(context: String): String = {
s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n"
}
}
class TLEdgeOut(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
// Transfers
def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquireBlock
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquirePerm
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.Release
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ReleaseData
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) =
Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B)
def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAck
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions, data)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAckData
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B)
def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink)
def GrantAck(toSink: UInt): TLBundleE = {
val e = Wire(new TLBundleE(bundle))
e.sink := toSink
e
}
// Accesses
def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsGetFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Get
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutFullFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutFullData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, mask, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutPartialFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutPartialData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = {
require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsArithmeticFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.ArithmeticData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsLogicalFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.LogicalData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = {
require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsHintFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Hint
a.param := param
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data)
def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAckData
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size)
def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.HintAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
}
class TLEdgeIn(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = {
val todo = x.filter(!_.isEmpty)
val heads = todo.map(_.head)
val tails = todo.map(_.tail)
if (todo.isEmpty) Nil else { heads +: myTranspose(tails) }
}
// Transfers
def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = {
require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}")
val legal = client.supportsProbe(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Probe
b.param := capPermissions
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.Grant
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.GrantData
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B)
def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.ReleaseAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
// Accesses
def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = {
require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsGet(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Get
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsPutFull(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutFullData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, mask, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsPutPartial(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutPartialData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsArithmetic(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.ArithmeticData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsLogical(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.LogicalData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = {
require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsHint(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Hint
b.param := param
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size)
def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied)
def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data)
def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAckData
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B)
def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied)
def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B)
def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.HintAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
}
| module TLMonitor_60( // @[Monitor.scala:36:7]
input clock, // @[Monitor.scala:36:7]
input reset, // @[Monitor.scala:36:7]
input io_in_a_ready, // @[Monitor.scala:20:14]
input io_in_a_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14]
input [1:0] io_in_a_bits_size, // @[Monitor.scala:20:14]
input [10:0] io_in_a_bits_source, // @[Monitor.scala:20:14]
input [20:0] io_in_a_bits_address, // @[Monitor.scala:20:14]
input [7:0] io_in_a_bits_mask, // @[Monitor.scala:20:14]
input [63:0] io_in_a_bits_data, // @[Monitor.scala:20:14]
input io_in_a_bits_corrupt, // @[Monitor.scala:20:14]
input io_in_d_ready, // @[Monitor.scala:20:14]
input io_in_d_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14]
input [1:0] io_in_d_bits_size, // @[Monitor.scala:20:14]
input [10:0] io_in_d_bits_source // @[Monitor.scala:20:14]
);
wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11]
wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11]
wire io_in_a_ready_0 = io_in_a_ready; // @[Monitor.scala:36:7]
wire io_in_a_valid_0 = io_in_a_valid; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_opcode_0 = io_in_a_bits_opcode; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_param_0 = io_in_a_bits_param; // @[Monitor.scala:36:7]
wire [1:0] io_in_a_bits_size_0 = io_in_a_bits_size; // @[Monitor.scala:36:7]
wire [10:0] io_in_a_bits_source_0 = io_in_a_bits_source; // @[Monitor.scala:36:7]
wire [20:0] io_in_a_bits_address_0 = io_in_a_bits_address; // @[Monitor.scala:36:7]
wire [7:0] io_in_a_bits_mask_0 = io_in_a_bits_mask; // @[Monitor.scala:36:7]
wire [63:0] io_in_a_bits_data_0 = io_in_a_bits_data; // @[Monitor.scala:36:7]
wire io_in_a_bits_corrupt_0 = io_in_a_bits_corrupt; // @[Monitor.scala:36:7]
wire io_in_d_ready_0 = io_in_d_ready; // @[Monitor.scala:36:7]
wire io_in_d_valid_0 = io_in_d_valid; // @[Monitor.scala:36:7]
wire [2:0] io_in_d_bits_opcode_0 = io_in_d_bits_opcode; // @[Monitor.scala:36:7]
wire [1:0] io_in_d_bits_size_0 = io_in_d_bits_size; // @[Monitor.scala:36:7]
wire [10:0] io_in_d_bits_source_0 = io_in_d_bits_source; // @[Monitor.scala:36:7]
wire [63:0] io_in_d_bits_data = 64'h0; // @[Monitor.scala:36:7]
wire [63:0] _c_first_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_first_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_first_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_first_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_set_wo_ready_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_set_wo_ready_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_opcodes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_opcodes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_sizes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_sizes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_opcodes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_opcodes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_sizes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_sizes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_probe_ack_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_probe_ack_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_probe_ack_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_probe_ack_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_4_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_5_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire io_in_d_bits_sink = 1'h0; // @[Monitor.scala:36:7]
wire io_in_d_bits_denied = 1'h0; // @[Monitor.scala:36:7]
wire io_in_d_bits_corrupt = 1'h0; // @[Monitor.scala:36:7]
wire _source_ok_T = 1'h0; // @[Parameters.scala:54:10]
wire _source_ok_T_6 = 1'h0; // @[Parameters.scala:54:10]
wire sink_ok = 1'h0; // @[Monitor.scala:309:31]
wire a_first_beats1_decode = 1'h0; // @[Edges.scala:220:59]
wire a_first_beats1 = 1'h0; // @[Edges.scala:221:14]
wire a_first_count = 1'h0; // @[Edges.scala:234:25]
wire d_first_beats1_decode = 1'h0; // @[Edges.scala:220:59]
wire d_first_beats1 = 1'h0; // @[Edges.scala:221:14]
wire d_first_count = 1'h0; // @[Edges.scala:234:25]
wire a_first_beats1_decode_1 = 1'h0; // @[Edges.scala:220:59]
wire a_first_beats1_1 = 1'h0; // @[Edges.scala:221:14]
wire a_first_count_1 = 1'h0; // @[Edges.scala:234:25]
wire d_first_beats1_decode_1 = 1'h0; // @[Edges.scala:220:59]
wire d_first_beats1_1 = 1'h0; // @[Edges.scala:221:14]
wire d_first_count_1 = 1'h0; // @[Edges.scala:234:25]
wire _c_first_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_T = 1'h0; // @[Decoupled.scala:51:35]
wire c_first_beats1_decode = 1'h0; // @[Edges.scala:220:59]
wire c_first_beats1_opdata = 1'h0; // @[Edges.scala:102:36]
wire c_first_beats1 = 1'h0; // @[Edges.scala:221:14]
wire _c_first_last_T = 1'h0; // @[Edges.scala:232:25]
wire c_first_done = 1'h0; // @[Edges.scala:233:22]
wire _c_first_count_T = 1'h0; // @[Edges.scala:234:27]
wire c_first_count = 1'h0; // @[Edges.scala:234:25]
wire _c_first_counter_T = 1'h0; // @[Edges.scala:236:21]
wire d_first_beats1_decode_2 = 1'h0; // @[Edges.scala:220:59]
wire d_first_beats1_2 = 1'h0; // @[Edges.scala:221:14]
wire d_first_count_2 = 1'h0; // @[Edges.scala:234:25]
wire _c_set_wo_ready_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_wo_ready_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_wo_ready_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_T = 1'h0; // @[Monitor.scala:772:47]
wire _c_probe_ack_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_T_1 = 1'h0; // @[Monitor.scala:772:95]
wire c_probe_ack = 1'h0; // @[Monitor.scala:772:71]
wire _same_cycle_resp_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_T_3 = 1'h0; // @[Monitor.scala:795:44]
wire _same_cycle_resp_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_T_4 = 1'h0; // @[Edges.scala:68:36]
wire _same_cycle_resp_T_5 = 1'h0; // @[Edges.scala:68:51]
wire _same_cycle_resp_T_6 = 1'h0; // @[Edges.scala:68:40]
wire _same_cycle_resp_T_7 = 1'h0; // @[Monitor.scala:795:55]
wire _same_cycle_resp_WIRE_4_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_4_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_4_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_5_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_5_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_5_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire same_cycle_resp_1 = 1'h0; // @[Monitor.scala:795:88]
wire _source_ok_T_1 = 1'h1; // @[Parameters.scala:54:32]
wire _source_ok_T_2 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_3 = 1'h1; // @[Parameters.scala:54:67]
wire _source_ok_T_7 = 1'h1; // @[Parameters.scala:54:32]
wire _source_ok_T_8 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_9 = 1'h1; // @[Parameters.scala:54:67]
wire _a_first_last_T_1 = 1'h1; // @[Edges.scala:232:43]
wire a_first_last = 1'h1; // @[Edges.scala:232:33]
wire _d_first_last_T_1 = 1'h1; // @[Edges.scala:232:43]
wire d_first_last = 1'h1; // @[Edges.scala:232:33]
wire _a_first_last_T_3 = 1'h1; // @[Edges.scala:232:43]
wire a_first_last_1 = 1'h1; // @[Edges.scala:232:33]
wire _d_first_last_T_3 = 1'h1; // @[Edges.scala:232:43]
wire d_first_last_1 = 1'h1; // @[Edges.scala:232:33]
wire c_first_counter1 = 1'h1; // @[Edges.scala:230:28]
wire c_first = 1'h1; // @[Edges.scala:231:25]
wire _c_first_last_T_1 = 1'h1; // @[Edges.scala:232:43]
wire c_first_last = 1'h1; // @[Edges.scala:232:33]
wire _d_first_last_T_5 = 1'h1; // @[Edges.scala:232:43]
wire d_first_last_2 = 1'h1; // @[Edges.scala:232:33]
wire [1:0] _c_first_counter1_T = 2'h3; // @[Edges.scala:230:28]
wire [1:0] io_in_d_bits_param = 2'h0; // @[Monitor.scala:36:7]
wire [1:0] _c_first_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_first_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _c_first_WIRE_2_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_first_WIRE_3_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _c_set_wo_ready_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_set_wo_ready_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _c_set_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_set_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _c_opcodes_set_interm_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_opcodes_set_interm_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _c_sizes_set_interm_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_sizes_set_interm_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _c_opcodes_set_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_opcodes_set_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _c_sizes_set_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_sizes_set_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _c_probe_ack_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_probe_ack_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _c_probe_ack_WIRE_2_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_probe_ack_WIRE_3_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _same_cycle_resp_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _same_cycle_resp_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _same_cycle_resp_WIRE_2_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _same_cycle_resp_WIRE_3_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _same_cycle_resp_WIRE_4_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _same_cycle_resp_WIRE_5_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_first_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_first_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_first_WIRE_2_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_first_WIRE_3_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_set_wo_ready_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_set_wo_ready_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_set_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_set_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_opcodes_set_interm_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_opcodes_set_interm_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_sizes_set_interm_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_sizes_set_interm_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_opcodes_set_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_opcodes_set_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_sizes_set_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_sizes_set_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_probe_ack_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_probe_ack_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_probe_ack_WIRE_2_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_probe_ack_WIRE_3_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _same_cycle_resp_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _same_cycle_resp_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _same_cycle_resp_WIRE_2_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _same_cycle_resp_WIRE_3_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _same_cycle_resp_WIRE_4_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _same_cycle_resp_WIRE_5_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [10:0] _c_first_WIRE_bits_source = 11'h0; // @[Bundles.scala:265:74]
wire [10:0] _c_first_WIRE_1_bits_source = 11'h0; // @[Bundles.scala:265:61]
wire [10:0] _c_first_WIRE_2_bits_source = 11'h0; // @[Bundles.scala:265:74]
wire [10:0] _c_first_WIRE_3_bits_source = 11'h0; // @[Bundles.scala:265:61]
wire [10:0] _c_set_wo_ready_WIRE_bits_source = 11'h0; // @[Bundles.scala:265:74]
wire [10:0] _c_set_wo_ready_WIRE_1_bits_source = 11'h0; // @[Bundles.scala:265:61]
wire [10:0] _c_set_WIRE_bits_source = 11'h0; // @[Bundles.scala:265:74]
wire [10:0] _c_set_WIRE_1_bits_source = 11'h0; // @[Bundles.scala:265:61]
wire [10:0] _c_opcodes_set_interm_WIRE_bits_source = 11'h0; // @[Bundles.scala:265:74]
wire [10:0] _c_opcodes_set_interm_WIRE_1_bits_source = 11'h0; // @[Bundles.scala:265:61]
wire [10:0] _c_sizes_set_interm_WIRE_bits_source = 11'h0; // @[Bundles.scala:265:74]
wire [10:0] _c_sizes_set_interm_WIRE_1_bits_source = 11'h0; // @[Bundles.scala:265:61]
wire [10:0] _c_opcodes_set_WIRE_bits_source = 11'h0; // @[Bundles.scala:265:74]
wire [10:0] _c_opcodes_set_WIRE_1_bits_source = 11'h0; // @[Bundles.scala:265:61]
wire [10:0] _c_sizes_set_WIRE_bits_source = 11'h0; // @[Bundles.scala:265:74]
wire [10:0] _c_sizes_set_WIRE_1_bits_source = 11'h0; // @[Bundles.scala:265:61]
wire [10:0] _c_probe_ack_WIRE_bits_source = 11'h0; // @[Bundles.scala:265:74]
wire [10:0] _c_probe_ack_WIRE_1_bits_source = 11'h0; // @[Bundles.scala:265:61]
wire [10:0] _c_probe_ack_WIRE_2_bits_source = 11'h0; // @[Bundles.scala:265:74]
wire [10:0] _c_probe_ack_WIRE_3_bits_source = 11'h0; // @[Bundles.scala:265:61]
wire [10:0] _same_cycle_resp_WIRE_bits_source = 11'h0; // @[Bundles.scala:265:74]
wire [10:0] _same_cycle_resp_WIRE_1_bits_source = 11'h0; // @[Bundles.scala:265:61]
wire [10:0] _same_cycle_resp_WIRE_2_bits_source = 11'h0; // @[Bundles.scala:265:74]
wire [10:0] _same_cycle_resp_WIRE_3_bits_source = 11'h0; // @[Bundles.scala:265:61]
wire [10:0] _same_cycle_resp_WIRE_4_bits_source = 11'h0; // @[Bundles.scala:265:74]
wire [10:0] _same_cycle_resp_WIRE_5_bits_source = 11'h0; // @[Bundles.scala:265:61]
wire [2:0] responseMap_0 = 3'h0; // @[Monitor.scala:643:42]
wire [2:0] responseMap_1 = 3'h0; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_0 = 3'h0; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_1 = 3'h0; // @[Monitor.scala:644:42]
wire [2:0] _c_first_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_beats1_decode_T_2 = 3'h0; // @[package.scala:243:46]
wire [2:0] c_sizes_set_interm = 3'h0; // @[Monitor.scala:755:40]
wire [2:0] _c_set_wo_ready_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_T = 3'h0; // @[Monitor.scala:766:51]
wire [2:0] _c_opcodes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_4_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_4_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_5_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_5_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [15:0] _a_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _a_size_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _d_opcodes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _d_sizes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _c_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _c_size_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _d_opcodes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _d_sizes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57]
wire [16:0] _a_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _a_size_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _d_opcodes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _d_sizes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _c_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _c_size_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _d_opcodes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _d_sizes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57]
wire [15:0] _a_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _a_size_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _d_opcodes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _d_sizes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _c_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _c_size_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _d_opcodes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _d_sizes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51]
wire [16385:0] _c_sizes_set_T_1 = 16386'h0; // @[Monitor.scala:768:52]
wire [13:0] _c_opcodes_set_T = 14'h0; // @[Monitor.scala:767:79]
wire [13:0] _c_sizes_set_T = 14'h0; // @[Monitor.scala:768:77]
wire [16386:0] _c_opcodes_set_T_1 = 16387'h0; // @[Monitor.scala:767:54]
wire [2:0] responseMap_2 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMap_3 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMap_4 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_2 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_3 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_4 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] _c_sizes_set_interm_T_1 = 3'h1; // @[Monitor.scala:766:59]
wire [3:0] _c_opcodes_set_interm_T_1 = 4'h1; // @[Monitor.scala:765:61]
wire [3:0] c_opcodes_set_interm = 4'h0; // @[Monitor.scala:754:40]
wire [3:0] _c_opcodes_set_interm_T = 4'h0; // @[Monitor.scala:765:53]
wire [2047:0] _c_set_wo_ready_T = 2048'h1; // @[OneHot.scala:58:35]
wire [2047:0] _c_set_T = 2048'h1; // @[OneHot.scala:58:35]
wire [4159:0] c_opcodes_set = 4160'h0; // @[Monitor.scala:740:34]
wire [4159:0] c_sizes_set = 4160'h0; // @[Monitor.scala:741:34]
wire [1039:0] c_set = 1040'h0; // @[Monitor.scala:738:34]
wire [1039:0] c_set_wo_ready = 1040'h0; // @[Monitor.scala:739:34]
wire [2:0] _c_first_beats1_decode_T_1 = 3'h7; // @[package.scala:243:76]
wire [5:0] _c_first_beats1_decode_T = 6'h7; // @[package.scala:243:71]
wire [2:0] responseMap_6 = 3'h4; // @[Monitor.scala:643:42]
wire [2:0] responseMap_7 = 3'h4; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_7 = 3'h4; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_6 = 3'h5; // @[Monitor.scala:644:42]
wire [2:0] responseMap_5 = 3'h2; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_5 = 3'h2; // @[Monitor.scala:644:42]
wire [3:0] _a_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:637:123]
wire [3:0] _a_size_lookup_T_2 = 4'h4; // @[Monitor.scala:641:117]
wire [3:0] _d_opcodes_clr_T = 4'h4; // @[Monitor.scala:680:48]
wire [3:0] _d_sizes_clr_T = 4'h4; // @[Monitor.scala:681:48]
wire [3:0] _c_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:749:123]
wire [3:0] _c_size_lookup_T_2 = 4'h4; // @[Monitor.scala:750:119]
wire [3:0] _d_opcodes_clr_T_6 = 4'h4; // @[Monitor.scala:790:48]
wire [3:0] _d_sizes_clr_T_6 = 4'h4; // @[Monitor.scala:791:48]
wire [10:0] _source_ok_uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [10:0] _uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [10:0] _uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [10:0] _uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [10:0] _uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [10:0] _uncommonBits_T_4 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [10:0] _uncommonBits_T_5 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [10:0] _uncommonBits_T_6 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [10:0] _uncommonBits_T_7 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [10:0] _uncommonBits_T_8 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [10:0] _source_ok_uncommonBits_T_1 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [10:0] source_ok_uncommonBits = _source_ok_uncommonBits_T; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_4 = source_ok_uncommonBits < 11'h410; // @[Parameters.scala:52:56, :57:20]
wire _source_ok_T_5 = _source_ok_T_4; // @[Parameters.scala:56:48, :57:20]
wire _source_ok_WIRE_0 = _source_ok_T_5; // @[Parameters.scala:1138:31]
wire [5:0] _GEN = 6'h7 << io_in_a_bits_size_0; // @[package.scala:243:71]
wire [5:0] _is_aligned_mask_T; // @[package.scala:243:71]
assign _is_aligned_mask_T = _GEN; // @[package.scala:243:71]
wire [5:0] _a_first_beats1_decode_T; // @[package.scala:243:71]
assign _a_first_beats1_decode_T = _GEN; // @[package.scala:243:71]
wire [5:0] _a_first_beats1_decode_T_3; // @[package.scala:243:71]
assign _a_first_beats1_decode_T_3 = _GEN; // @[package.scala:243:71]
wire [2:0] _is_aligned_mask_T_1 = _is_aligned_mask_T[2:0]; // @[package.scala:243:{71,76}]
wire [2:0] is_aligned_mask = ~_is_aligned_mask_T_1; // @[package.scala:243:{46,76}]
wire [20:0] _is_aligned_T = {18'h0, io_in_a_bits_address_0[2:0] & is_aligned_mask}; // @[package.scala:243:46]
wire is_aligned = _is_aligned_T == 21'h0; // @[Edges.scala:21:{16,24}]
wire [2:0] _mask_sizeOH_T = {1'h0, io_in_a_bits_size_0}; // @[Misc.scala:202:34]
wire [1:0] mask_sizeOH_shiftAmount = _mask_sizeOH_T[1:0]; // @[OneHot.scala:64:49]
wire [3:0] _mask_sizeOH_T_1 = 4'h1 << mask_sizeOH_shiftAmount; // @[OneHot.scala:64:49, :65:12]
wire [2:0] _mask_sizeOH_T_2 = _mask_sizeOH_T_1[2:0]; // @[OneHot.scala:65:{12,27}]
wire [2:0] mask_sizeOH = {_mask_sizeOH_T_2[2:1], 1'h1}; // @[OneHot.scala:65:27]
wire mask_sub_sub_sub_0_1 = &io_in_a_bits_size_0; // @[Misc.scala:206:21]
wire mask_sub_sub_size = mask_sizeOH[2]; // @[Misc.scala:202:81, :209:26]
wire mask_sub_sub_bit = io_in_a_bits_address_0[2]; // @[Misc.scala:210:26]
wire mask_sub_sub_1_2 = mask_sub_sub_bit; // @[Misc.scala:210:26, :214:27]
wire mask_sub_sub_nbit = ~mask_sub_sub_bit; // @[Misc.scala:210:26, :211:20]
wire mask_sub_sub_0_2 = mask_sub_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_sub_acc_T = mask_sub_sub_size & mask_sub_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_sub_0_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T; // @[Misc.scala:206:21, :215:{29,38}]
wire _mask_sub_sub_acc_T_1 = mask_sub_sub_size & mask_sub_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_sub_1_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T_1; // @[Misc.scala:206:21, :215:{29,38}]
wire mask_sub_size = mask_sizeOH[1]; // @[Misc.scala:202:81, :209:26]
wire mask_sub_bit = io_in_a_bits_address_0[1]; // @[Misc.scala:210:26]
wire mask_sub_nbit = ~mask_sub_bit; // @[Misc.scala:210:26, :211:20]
wire mask_sub_0_2 = mask_sub_sub_0_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_acc_T = mask_sub_size & mask_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_0_1 = mask_sub_sub_0_1 | _mask_sub_acc_T; // @[Misc.scala:215:{29,38}]
wire mask_sub_1_2 = mask_sub_sub_0_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_sub_acc_T_1 = mask_sub_size & mask_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_1_1 = mask_sub_sub_0_1 | _mask_sub_acc_T_1; // @[Misc.scala:215:{29,38}]
wire mask_sub_2_2 = mask_sub_sub_1_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_acc_T_2 = mask_sub_size & mask_sub_2_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_2_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_2; // @[Misc.scala:215:{29,38}]
wire mask_sub_3_2 = mask_sub_sub_1_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_sub_acc_T_3 = mask_sub_size & mask_sub_3_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_3_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_3; // @[Misc.scala:215:{29,38}]
wire mask_size = mask_sizeOH[0]; // @[Misc.scala:202:81, :209:26]
wire mask_bit = io_in_a_bits_address_0[0]; // @[Misc.scala:210:26]
wire mask_nbit = ~mask_bit; // @[Misc.scala:210:26, :211:20]
wire mask_eq = mask_sub_0_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T = mask_size & mask_eq; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc = mask_sub_0_1 | _mask_acc_T; // @[Misc.scala:215:{29,38}]
wire mask_eq_1 = mask_sub_0_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_1 = mask_size & mask_eq_1; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_1 = mask_sub_0_1 | _mask_acc_T_1; // @[Misc.scala:215:{29,38}]
wire mask_eq_2 = mask_sub_1_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_2 = mask_size & mask_eq_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_2 = mask_sub_1_1 | _mask_acc_T_2; // @[Misc.scala:215:{29,38}]
wire mask_eq_3 = mask_sub_1_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_3 = mask_size & mask_eq_3; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_3 = mask_sub_1_1 | _mask_acc_T_3; // @[Misc.scala:215:{29,38}]
wire mask_eq_4 = mask_sub_2_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_4 = mask_size & mask_eq_4; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_4 = mask_sub_2_1 | _mask_acc_T_4; // @[Misc.scala:215:{29,38}]
wire mask_eq_5 = mask_sub_2_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_5 = mask_size & mask_eq_5; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_5 = mask_sub_2_1 | _mask_acc_T_5; // @[Misc.scala:215:{29,38}]
wire mask_eq_6 = mask_sub_3_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_6 = mask_size & mask_eq_6; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_6 = mask_sub_3_1 | _mask_acc_T_6; // @[Misc.scala:215:{29,38}]
wire mask_eq_7 = mask_sub_3_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_7 = mask_size & mask_eq_7; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_7 = mask_sub_3_1 | _mask_acc_T_7; // @[Misc.scala:215:{29,38}]
wire [1:0] mask_lo_lo = {mask_acc_1, mask_acc}; // @[Misc.scala:215:29, :222:10]
wire [1:0] mask_lo_hi = {mask_acc_3, mask_acc_2}; // @[Misc.scala:215:29, :222:10]
wire [3:0] mask_lo = {mask_lo_hi, mask_lo_lo}; // @[Misc.scala:222:10]
wire [1:0] mask_hi_lo = {mask_acc_5, mask_acc_4}; // @[Misc.scala:215:29, :222:10]
wire [1:0] mask_hi_hi = {mask_acc_7, mask_acc_6}; // @[Misc.scala:215:29, :222:10]
wire [3:0] mask_hi = {mask_hi_hi, mask_hi_lo}; // @[Misc.scala:222:10]
wire [7:0] mask = {mask_hi, mask_lo}; // @[Misc.scala:222:10]
wire [10:0] uncommonBits = _uncommonBits_T; // @[Parameters.scala:52:{29,56}]
wire [10:0] uncommonBits_1 = _uncommonBits_T_1; // @[Parameters.scala:52:{29,56}]
wire [10:0] uncommonBits_2 = _uncommonBits_T_2; // @[Parameters.scala:52:{29,56}]
wire [10:0] uncommonBits_3 = _uncommonBits_T_3; // @[Parameters.scala:52:{29,56}]
wire [10:0] uncommonBits_4 = _uncommonBits_T_4; // @[Parameters.scala:52:{29,56}]
wire [10:0] uncommonBits_5 = _uncommonBits_T_5; // @[Parameters.scala:52:{29,56}]
wire [10:0] uncommonBits_6 = _uncommonBits_T_6; // @[Parameters.scala:52:{29,56}]
wire [10:0] uncommonBits_7 = _uncommonBits_T_7; // @[Parameters.scala:52:{29,56}]
wire [10:0] uncommonBits_8 = _uncommonBits_T_8; // @[Parameters.scala:52:{29,56}]
wire [10:0] source_ok_uncommonBits_1 = _source_ok_uncommonBits_T_1; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_10 = source_ok_uncommonBits_1 < 11'h410; // @[Parameters.scala:52:56, :57:20]
wire _source_ok_T_11 = _source_ok_T_10; // @[Parameters.scala:56:48, :57:20]
wire _source_ok_WIRE_1_0 = _source_ok_T_11; // @[Parameters.scala:1138:31]
wire _T_665 = io_in_a_ready_0 & io_in_a_valid_0; // @[Decoupled.scala:51:35]
wire _a_first_T; // @[Decoupled.scala:51:35]
assign _a_first_T = _T_665; // @[Decoupled.scala:51:35]
wire _a_first_T_1; // @[Decoupled.scala:51:35]
assign _a_first_T_1 = _T_665; // @[Decoupled.scala:51:35]
wire a_first_done = _a_first_T; // @[Decoupled.scala:51:35]
wire [2:0] _a_first_beats1_decode_T_1 = _a_first_beats1_decode_T[2:0]; // @[package.scala:243:{71,76}]
wire [2:0] _a_first_beats1_decode_T_2 = ~_a_first_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire _a_first_beats1_opdata_T = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7]
wire _a_first_beats1_opdata_T_1 = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7]
wire a_first_beats1_opdata = ~_a_first_beats1_opdata_T; // @[Edges.scala:92:{28,37}]
reg a_first_counter; // @[Edges.scala:229:27]
wire _a_first_last_T = a_first_counter; // @[Edges.scala:229:27, :232:25]
wire [1:0] _a_first_counter1_T = {1'h0, a_first_counter} - 2'h1; // @[Edges.scala:229:27, :230:28]
wire a_first_counter1 = _a_first_counter1_T[0]; // @[Edges.scala:230:28]
wire a_first = ~a_first_counter; // @[Edges.scala:229:27, :231:25]
wire _a_first_count_T = ~a_first_counter1; // @[Edges.scala:230:28, :234:27]
wire _a_first_counter_T = ~a_first & a_first_counter1; // @[Edges.scala:230:28, :231:25, :236:21]
reg [2:0] opcode; // @[Monitor.scala:387:22]
reg [2:0] param; // @[Monitor.scala:388:22]
reg [1:0] size; // @[Monitor.scala:389:22]
reg [10:0] source; // @[Monitor.scala:390:22]
reg [20:0] address; // @[Monitor.scala:391:22]
wire _T_733 = io_in_d_ready_0 & io_in_d_valid_0; // @[Decoupled.scala:51:35]
wire _d_first_T; // @[Decoupled.scala:51:35]
assign _d_first_T = _T_733; // @[Decoupled.scala:51:35]
wire _d_first_T_1; // @[Decoupled.scala:51:35]
assign _d_first_T_1 = _T_733; // @[Decoupled.scala:51:35]
wire _d_first_T_2; // @[Decoupled.scala:51:35]
assign _d_first_T_2 = _T_733; // @[Decoupled.scala:51:35]
wire d_first_done = _d_first_T; // @[Decoupled.scala:51:35]
wire [5:0] _GEN_0 = 6'h7 << io_in_d_bits_size_0; // @[package.scala:243:71]
wire [5:0] _d_first_beats1_decode_T; // @[package.scala:243:71]
assign _d_first_beats1_decode_T = _GEN_0; // @[package.scala:243:71]
wire [5:0] _d_first_beats1_decode_T_3; // @[package.scala:243:71]
assign _d_first_beats1_decode_T_3 = _GEN_0; // @[package.scala:243:71]
wire [5:0] _d_first_beats1_decode_T_6; // @[package.scala:243:71]
assign _d_first_beats1_decode_T_6 = _GEN_0; // @[package.scala:243:71]
wire [2:0] _d_first_beats1_decode_T_1 = _d_first_beats1_decode_T[2:0]; // @[package.scala:243:{71,76}]
wire [2:0] _d_first_beats1_decode_T_2 = ~_d_first_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire d_first_beats1_opdata = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire d_first_beats1_opdata_1 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire d_first_beats1_opdata_2 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
reg d_first_counter; // @[Edges.scala:229:27]
wire _d_first_last_T = d_first_counter; // @[Edges.scala:229:27, :232:25]
wire [1:0] _d_first_counter1_T = {1'h0, d_first_counter} - 2'h1; // @[Edges.scala:229:27, :230:28]
wire d_first_counter1 = _d_first_counter1_T[0]; // @[Edges.scala:230:28]
wire d_first = ~d_first_counter; // @[Edges.scala:229:27, :231:25]
wire _d_first_count_T = ~d_first_counter1; // @[Edges.scala:230:28, :234:27]
wire _d_first_counter_T = ~d_first & d_first_counter1; // @[Edges.scala:230:28, :231:25, :236:21]
reg [2:0] opcode_1; // @[Monitor.scala:538:22]
reg [1:0] size_1; // @[Monitor.scala:540:22]
reg [10:0] source_1; // @[Monitor.scala:541:22]
reg [1039:0] inflight; // @[Monitor.scala:614:27]
reg [4159:0] inflight_opcodes; // @[Monitor.scala:616:35]
reg [4159:0] inflight_sizes; // @[Monitor.scala:618:33]
wire a_first_done_1 = _a_first_T_1; // @[Decoupled.scala:51:35]
wire [2:0] _a_first_beats1_decode_T_4 = _a_first_beats1_decode_T_3[2:0]; // @[package.scala:243:{71,76}]
wire [2:0] _a_first_beats1_decode_T_5 = ~_a_first_beats1_decode_T_4; // @[package.scala:243:{46,76}]
wire a_first_beats1_opdata_1 = ~_a_first_beats1_opdata_T_1; // @[Edges.scala:92:{28,37}]
reg a_first_counter_1; // @[Edges.scala:229:27]
wire _a_first_last_T_2 = a_first_counter_1; // @[Edges.scala:229:27, :232:25]
wire [1:0] _a_first_counter1_T_1 = {1'h0, a_first_counter_1} - 2'h1; // @[Edges.scala:229:27, :230:28]
wire a_first_counter1_1 = _a_first_counter1_T_1[0]; // @[Edges.scala:230:28]
wire a_first_1 = ~a_first_counter_1; // @[Edges.scala:229:27, :231:25]
wire _a_first_count_T_1 = ~a_first_counter1_1; // @[Edges.scala:230:28, :234:27]
wire _a_first_counter_T_1 = ~a_first_1 & a_first_counter1_1; // @[Edges.scala:230:28, :231:25, :236:21]
wire d_first_done_1 = _d_first_T_1; // @[Decoupled.scala:51:35]
wire [2:0] _d_first_beats1_decode_T_4 = _d_first_beats1_decode_T_3[2:0]; // @[package.scala:243:{71,76}]
wire [2:0] _d_first_beats1_decode_T_5 = ~_d_first_beats1_decode_T_4; // @[package.scala:243:{46,76}]
reg d_first_counter_1; // @[Edges.scala:229:27]
wire _d_first_last_T_2 = d_first_counter_1; // @[Edges.scala:229:27, :232:25]
wire [1:0] _d_first_counter1_T_1 = {1'h0, d_first_counter_1} - 2'h1; // @[Edges.scala:229:27, :230:28]
wire d_first_counter1_1 = _d_first_counter1_T_1[0]; // @[Edges.scala:230:28]
wire d_first_1 = ~d_first_counter_1; // @[Edges.scala:229:27, :231:25]
wire _d_first_count_T_1 = ~d_first_counter1_1; // @[Edges.scala:230:28, :234:27]
wire _d_first_counter_T_1 = ~d_first_1 & d_first_counter1_1; // @[Edges.scala:230:28, :231:25, :236:21]
wire [1039:0] a_set; // @[Monitor.scala:626:34]
wire [1039:0] a_set_wo_ready; // @[Monitor.scala:627:34]
wire [4159:0] a_opcodes_set; // @[Monitor.scala:630:33]
wire [4159:0] a_sizes_set; // @[Monitor.scala:632:31]
wire [2:0] a_opcode_lookup; // @[Monitor.scala:635:35]
wire [13:0] _GEN_1 = {1'h0, io_in_d_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :637:69]
wire [13:0] _a_opcode_lookup_T; // @[Monitor.scala:637:69]
assign _a_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69]
wire [13:0] _a_size_lookup_T; // @[Monitor.scala:641:65]
assign _a_size_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :641:65]
wire [13:0] _d_opcodes_clr_T_4; // @[Monitor.scala:680:101]
assign _d_opcodes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :680:101]
wire [13:0] _d_sizes_clr_T_4; // @[Monitor.scala:681:99]
assign _d_sizes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :681:99]
wire [13:0] _c_opcode_lookup_T; // @[Monitor.scala:749:69]
assign _c_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :749:69]
wire [13:0] _c_size_lookup_T; // @[Monitor.scala:750:67]
assign _c_size_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :750:67]
wire [13:0] _d_opcodes_clr_T_10; // @[Monitor.scala:790:101]
assign _d_opcodes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :790:101]
wire [13:0] _d_sizes_clr_T_10; // @[Monitor.scala:791:99]
assign _d_sizes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :791:99]
wire [4159:0] _a_opcode_lookup_T_1 = inflight_opcodes >> _a_opcode_lookup_T; // @[Monitor.scala:616:35, :637:{44,69}]
wire [4159:0] _a_opcode_lookup_T_6 = {4156'h0, _a_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:637:{44,97}]
wire [4159:0] _a_opcode_lookup_T_7 = {1'h0, _a_opcode_lookup_T_6[4159:1]}; // @[Monitor.scala:637:{97,152}]
assign a_opcode_lookup = _a_opcode_lookup_T_7[2:0]; // @[Monitor.scala:635:35, :637:{21,152}]
wire [3:0] a_size_lookup; // @[Monitor.scala:639:33]
wire [4159:0] _a_size_lookup_T_1 = inflight_sizes >> _a_size_lookup_T; // @[Monitor.scala:618:33, :641:{40,65}]
wire [4159:0] _a_size_lookup_T_6 = {4156'h0, _a_size_lookup_T_1[3:0]}; // @[Monitor.scala:641:{40,91}]
wire [4159:0] _a_size_lookup_T_7 = {1'h0, _a_size_lookup_T_6[4159:1]}; // @[Monitor.scala:641:{91,144}]
assign a_size_lookup = _a_size_lookup_T_7[3:0]; // @[Monitor.scala:639:33, :641:{19,144}]
wire [3:0] a_opcodes_set_interm; // @[Monitor.scala:646:40]
wire [2:0] a_sizes_set_interm; // @[Monitor.scala:648:38]
wire _same_cycle_resp_T = io_in_a_valid_0 & a_first_1; // @[Monitor.scala:36:7, :651:26, :684:44]
wire [2047:0] _GEN_2 = 2048'h1 << io_in_a_bits_source_0; // @[OneHot.scala:58:35]
wire [2047:0] _a_set_wo_ready_T; // @[OneHot.scala:58:35]
assign _a_set_wo_ready_T = _GEN_2; // @[OneHot.scala:58:35]
wire [2047:0] _a_set_T; // @[OneHot.scala:58:35]
assign _a_set_T = _GEN_2; // @[OneHot.scala:58:35]
assign a_set_wo_ready = _same_cycle_resp_T ? _a_set_wo_ready_T[1039:0] : 1040'h0; // @[OneHot.scala:58:35]
wire _T_598 = _T_665 & a_first_1; // @[Decoupled.scala:51:35]
assign a_set = _T_598 ? _a_set_T[1039:0] : 1040'h0; // @[OneHot.scala:58:35]
wire [3:0] _a_opcodes_set_interm_T = {io_in_a_bits_opcode_0, 1'h0}; // @[Monitor.scala:36:7, :657:53]
wire [3:0] _a_opcodes_set_interm_T_1 = {_a_opcodes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:657:{53,61}]
assign a_opcodes_set_interm = _T_598 ? _a_opcodes_set_interm_T_1 : 4'h0; // @[Monitor.scala:646:40, :655:{25,70}, :657:{28,61}]
wire [2:0] _a_sizes_set_interm_T = {io_in_a_bits_size_0, 1'h0}; // @[Monitor.scala:36:7, :658:51]
wire [2:0] _a_sizes_set_interm_T_1 = {_a_sizes_set_interm_T[2:1], 1'h1}; // @[Monitor.scala:658:{51,59}]
assign a_sizes_set_interm = _T_598 ? _a_sizes_set_interm_T_1 : 3'h0; // @[Monitor.scala:648:38, :655:{25,70}, :658:{28,59}]
wire [13:0] _GEN_3 = {1'h0, io_in_a_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :659:79]
wire [13:0] _a_opcodes_set_T; // @[Monitor.scala:659:79]
assign _a_opcodes_set_T = _GEN_3; // @[Monitor.scala:659:79]
wire [13:0] _a_sizes_set_T; // @[Monitor.scala:660:77]
assign _a_sizes_set_T = _GEN_3; // @[Monitor.scala:659:79, :660:77]
wire [16386:0] _a_opcodes_set_T_1 = {16383'h0, a_opcodes_set_interm} << _a_opcodes_set_T; // @[Monitor.scala:646:40, :659:{54,79}]
assign a_opcodes_set = _T_598 ? _a_opcodes_set_T_1[4159:0] : 4160'h0; // @[Monitor.scala:630:33, :655:{25,70}, :659:{28,54}]
wire [16385:0] _a_sizes_set_T_1 = {16383'h0, a_sizes_set_interm} << _a_sizes_set_T; // @[Monitor.scala:648:38, :659:54, :660:{52,77}]
assign a_sizes_set = _T_598 ? _a_sizes_set_T_1[4159:0] : 4160'h0; // @[Monitor.scala:632:31, :655:{25,70}, :660:{28,52}]
wire [1039:0] d_clr; // @[Monitor.scala:664:34]
wire [1039:0] d_clr_wo_ready; // @[Monitor.scala:665:34]
wire [4159:0] d_opcodes_clr; // @[Monitor.scala:668:33]
wire [4159:0] d_sizes_clr; // @[Monitor.scala:670:31]
wire _GEN_4 = io_in_d_bits_opcode_0 == 3'h6; // @[Monitor.scala:36:7, :673:46]
wire d_release_ack; // @[Monitor.scala:673:46]
assign d_release_ack = _GEN_4; // @[Monitor.scala:673:46]
wire d_release_ack_1; // @[Monitor.scala:783:46]
assign d_release_ack_1 = _GEN_4; // @[Monitor.scala:673:46, :783:46]
wire _T_644 = io_in_d_valid_0 & d_first_1; // @[Monitor.scala:36:7, :674:26]
wire [2047:0] _GEN_5 = 2048'h1 << io_in_d_bits_source_0; // @[OneHot.scala:58:35]
wire [2047:0] _d_clr_wo_ready_T; // @[OneHot.scala:58:35]
assign _d_clr_wo_ready_T = _GEN_5; // @[OneHot.scala:58:35]
wire [2047:0] _d_clr_T; // @[OneHot.scala:58:35]
assign _d_clr_T = _GEN_5; // @[OneHot.scala:58:35]
wire [2047:0] _d_clr_wo_ready_T_1; // @[OneHot.scala:58:35]
assign _d_clr_wo_ready_T_1 = _GEN_5; // @[OneHot.scala:58:35]
wire [2047:0] _d_clr_T_1; // @[OneHot.scala:58:35]
assign _d_clr_T_1 = _GEN_5; // @[OneHot.scala:58:35]
assign d_clr_wo_ready = _T_644 & ~d_release_ack ? _d_clr_wo_ready_T[1039:0] : 1040'h0; // @[OneHot.scala:58:35]
wire _T_613 = _T_733 & d_first_1 & ~d_release_ack; // @[Decoupled.scala:51:35]
assign d_clr = _T_613 ? _d_clr_T[1039:0] : 1040'h0; // @[OneHot.scala:58:35]
wire [16398:0] _d_opcodes_clr_T_5 = 16399'hF << _d_opcodes_clr_T_4; // @[Monitor.scala:680:{76,101}]
assign d_opcodes_clr = _T_613 ? _d_opcodes_clr_T_5[4159:0] : 4160'h0; // @[Monitor.scala:668:33, :678:{25,70,89}, :680:{21,76}]
wire [16398:0] _d_sizes_clr_T_5 = 16399'hF << _d_sizes_clr_T_4; // @[Monitor.scala:681:{74,99}]
assign d_sizes_clr = _T_613 ? _d_sizes_clr_T_5[4159:0] : 4160'h0; // @[Monitor.scala:670:31, :678:{25,70,89}, :681:{21,74}]
wire _same_cycle_resp_T_1 = _same_cycle_resp_T; // @[Monitor.scala:684:{44,55}]
wire _same_cycle_resp_T_2 = io_in_a_bits_source_0 == io_in_d_bits_source_0; // @[Monitor.scala:36:7, :684:113]
wire same_cycle_resp = _same_cycle_resp_T_1 & _same_cycle_resp_T_2; // @[Monitor.scala:684:{55,88,113}]
wire [1039:0] _inflight_T = inflight | a_set; // @[Monitor.scala:614:27, :626:34, :705:27]
wire [1039:0] _inflight_T_1 = ~d_clr; // @[Monitor.scala:664:34, :705:38]
wire [1039:0] _inflight_T_2 = _inflight_T & _inflight_T_1; // @[Monitor.scala:705:{27,36,38}]
wire [4159:0] _inflight_opcodes_T = inflight_opcodes | a_opcodes_set; // @[Monitor.scala:616:35, :630:33, :706:43]
wire [4159:0] _inflight_opcodes_T_1 = ~d_opcodes_clr; // @[Monitor.scala:668:33, :706:62]
wire [4159:0] _inflight_opcodes_T_2 = _inflight_opcodes_T & _inflight_opcodes_T_1; // @[Monitor.scala:706:{43,60,62}]
wire [4159:0] _inflight_sizes_T = inflight_sizes | a_sizes_set; // @[Monitor.scala:618:33, :632:31, :707:39]
wire [4159:0] _inflight_sizes_T_1 = ~d_sizes_clr; // @[Monitor.scala:670:31, :707:56]
wire [4159:0] _inflight_sizes_T_2 = _inflight_sizes_T & _inflight_sizes_T_1; // @[Monitor.scala:707:{39,54,56}]
reg [31:0] watchdog; // @[Monitor.scala:709:27]
wire [32:0] _watchdog_T = {1'h0, watchdog} + 33'h1; // @[Monitor.scala:709:27, :714:26]
wire [31:0] _watchdog_T_1 = _watchdog_T[31:0]; // @[Monitor.scala:714:26]
reg [1039:0] inflight_1; // @[Monitor.scala:726:35]
wire [1039:0] _inflight_T_3 = inflight_1; // @[Monitor.scala:726:35, :814:35]
reg [4159:0] inflight_opcodes_1; // @[Monitor.scala:727:35]
wire [4159:0] _inflight_opcodes_T_3 = inflight_opcodes_1; // @[Monitor.scala:727:35, :815:43]
reg [4159:0] inflight_sizes_1; // @[Monitor.scala:728:35]
wire [4159:0] _inflight_sizes_T_3 = inflight_sizes_1; // @[Monitor.scala:728:35, :816:41]
wire d_first_done_2 = _d_first_T_2; // @[Decoupled.scala:51:35]
wire [2:0] _d_first_beats1_decode_T_7 = _d_first_beats1_decode_T_6[2:0]; // @[package.scala:243:{71,76}]
wire [2:0] _d_first_beats1_decode_T_8 = ~_d_first_beats1_decode_T_7; // @[package.scala:243:{46,76}]
reg d_first_counter_2; // @[Edges.scala:229:27]
wire _d_first_last_T_4 = d_first_counter_2; // @[Edges.scala:229:27, :232:25]
wire [1:0] _d_first_counter1_T_2 = {1'h0, d_first_counter_2} - 2'h1; // @[Edges.scala:229:27, :230:28]
wire d_first_counter1_2 = _d_first_counter1_T_2[0]; // @[Edges.scala:230:28]
wire d_first_2 = ~d_first_counter_2; // @[Edges.scala:229:27, :231:25]
wire _d_first_count_T_2 = ~d_first_counter1_2; // @[Edges.scala:230:28, :234:27]
wire _d_first_counter_T_2 = ~d_first_2 & d_first_counter1_2; // @[Edges.scala:230:28, :231:25, :236:21]
wire [3:0] c_opcode_lookup; // @[Monitor.scala:747:35]
wire [3:0] c_size_lookup; // @[Monitor.scala:748:35]
wire [4159:0] _c_opcode_lookup_T_1 = inflight_opcodes_1 >> _c_opcode_lookup_T; // @[Monitor.scala:727:35, :749:{44,69}]
wire [4159:0] _c_opcode_lookup_T_6 = {4156'h0, _c_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:749:{44,97}]
wire [4159:0] _c_opcode_lookup_T_7 = {1'h0, _c_opcode_lookup_T_6[4159:1]}; // @[Monitor.scala:749:{97,152}]
assign c_opcode_lookup = _c_opcode_lookup_T_7[3:0]; // @[Monitor.scala:747:35, :749:{21,152}]
wire [4159:0] _c_size_lookup_T_1 = inflight_sizes_1 >> _c_size_lookup_T; // @[Monitor.scala:728:35, :750:{42,67}]
wire [4159:0] _c_size_lookup_T_6 = {4156'h0, _c_size_lookup_T_1[3:0]}; // @[Monitor.scala:750:{42,93}]
wire [4159:0] _c_size_lookup_T_7 = {1'h0, _c_size_lookup_T_6[4159:1]}; // @[Monitor.scala:750:{93,146}]
assign c_size_lookup = _c_size_lookup_T_7[3:0]; // @[Monitor.scala:748:35, :750:{21,146}]
wire [1039:0] d_clr_1; // @[Monitor.scala:774:34]
wire [1039:0] d_clr_wo_ready_1; // @[Monitor.scala:775:34]
wire [4159:0] d_opcodes_clr_1; // @[Monitor.scala:776:34]
wire [4159:0] d_sizes_clr_1; // @[Monitor.scala:777:34]
wire _T_709 = io_in_d_valid_0 & d_first_2; // @[Monitor.scala:36:7, :784:26]
assign d_clr_wo_ready_1 = _T_709 & d_release_ack_1 ? _d_clr_wo_ready_T_1[1039:0] : 1040'h0; // @[OneHot.scala:58:35]
wire _T_691 = _T_733 & d_first_2 & d_release_ack_1; // @[Decoupled.scala:51:35]
assign d_clr_1 = _T_691 ? _d_clr_T_1[1039:0] : 1040'h0; // @[OneHot.scala:58:35]
wire [16398:0] _d_opcodes_clr_T_11 = 16399'hF << _d_opcodes_clr_T_10; // @[Monitor.scala:790:{76,101}]
assign d_opcodes_clr_1 = _T_691 ? _d_opcodes_clr_T_11[4159:0] : 4160'h0; // @[Monitor.scala:776:34, :788:{25,70,88}, :790:{21,76}]
wire [16398:0] _d_sizes_clr_T_11 = 16399'hF << _d_sizes_clr_T_10; // @[Monitor.scala:791:{74,99}]
assign d_sizes_clr_1 = _T_691 ? _d_sizes_clr_T_11[4159:0] : 4160'h0; // @[Monitor.scala:777:34, :788:{25,70,88}, :791:{21,74}]
wire _same_cycle_resp_T_8 = io_in_d_bits_source_0 == 11'h0; // @[Monitor.scala:36:7, :795:113]
wire [1039:0] _inflight_T_4 = ~d_clr_1; // @[Monitor.scala:774:34, :814:46]
wire [1039:0] _inflight_T_5 = _inflight_T_3 & _inflight_T_4; // @[Monitor.scala:814:{35,44,46}]
wire [4159:0] _inflight_opcodes_T_4 = ~d_opcodes_clr_1; // @[Monitor.scala:776:34, :815:62]
wire [4159:0] _inflight_opcodes_T_5 = _inflight_opcodes_T_3 & _inflight_opcodes_T_4; // @[Monitor.scala:815:{43,60,62}]
wire [4159:0] _inflight_sizes_T_4 = ~d_sizes_clr_1; // @[Monitor.scala:777:34, :816:58]
wire [4159:0] _inflight_sizes_T_5 = _inflight_sizes_T_3 & _inflight_sizes_T_4; // @[Monitor.scala:816:{41,56,58}]
reg [31:0] watchdog_1; // @[Monitor.scala:818:27] |
Generate the Verilog code corresponding to the following Chisel files.
File MulRecFN.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (ported from Verilog to
Chisel by Andrew Waterman).
Copyright 2019, 2020 The Regents of the University of California. All rights
reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import chisel3.util._
import consts._
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class MulFullRawFN(expWidth: Int, sigWidth: Int) extends chisel3.RawModule
{
val io = IO(new Bundle {
val a = Input(new RawFloat(expWidth, sigWidth))
val b = Input(new RawFloat(expWidth, sigWidth))
val invalidExc = Output(Bool())
val rawOut = Output(new RawFloat(expWidth, sigWidth*2 - 1))
})
/*------------------------------------------------------------------------
*------------------------------------------------------------------------*/
val notSigNaN_invalidExc = (io.a.isInf && io.b.isZero) || (io.a.isZero && io.b.isInf)
val notNaN_isInfOut = io.a.isInf || io.b.isInf
val notNaN_isZeroOut = io.a.isZero || io.b.isZero
val notNaN_signOut = io.a.sign ^ io.b.sign
val common_sExpOut = io.a.sExp + io.b.sExp - (1<<expWidth).S
val common_sigOut = (io.a.sig * io.b.sig)(sigWidth*2 - 1, 0)
/*------------------------------------------------------------------------
*------------------------------------------------------------------------*/
io.invalidExc := isSigNaNRawFloat(io.a) || isSigNaNRawFloat(io.b) || notSigNaN_invalidExc
io.rawOut.isInf := notNaN_isInfOut
io.rawOut.isZero := notNaN_isZeroOut
io.rawOut.sExp := common_sExpOut
io.rawOut.isNaN := io.a.isNaN || io.b.isNaN
io.rawOut.sign := notNaN_signOut
io.rawOut.sig := common_sigOut
}
class MulRawFN(expWidth: Int, sigWidth: Int) extends chisel3.RawModule
{
val io = IO(new Bundle {
val a = Input(new RawFloat(expWidth, sigWidth))
val b = Input(new RawFloat(expWidth, sigWidth))
val invalidExc = Output(Bool())
val rawOut = Output(new RawFloat(expWidth, sigWidth + 2))
})
val mulFullRaw = Module(new MulFullRawFN(expWidth, sigWidth))
mulFullRaw.io.a := io.a
mulFullRaw.io.b := io.b
io.invalidExc := mulFullRaw.io.invalidExc
io.rawOut := mulFullRaw.io.rawOut
io.rawOut.sig := {
val sig = mulFullRaw.io.rawOut.sig
Cat(sig >> (sigWidth - 2), sig(sigWidth - 3, 0).orR)
}
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class MulRecFN(expWidth: Int, sigWidth: Int) extends chisel3.RawModule
{
val io = IO(new Bundle {
val a = Input(UInt((expWidth + sigWidth + 1).W))
val b = Input(UInt((expWidth + sigWidth + 1).W))
val roundingMode = Input(UInt(3.W))
val detectTininess = Input(Bool())
val out = Output(UInt((expWidth + sigWidth + 1).W))
val exceptionFlags = Output(UInt(5.W))
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val mulRawFN = Module(new MulRawFN(expWidth, sigWidth))
mulRawFN.io.a := rawFloatFromRecFN(expWidth, sigWidth, io.a)
mulRawFN.io.b := rawFloatFromRecFN(expWidth, sigWidth, io.b)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val roundRawFNToRecFN =
Module(new RoundRawFNToRecFN(expWidth, sigWidth, 0))
roundRawFNToRecFN.io.invalidExc := mulRawFN.io.invalidExc
roundRawFNToRecFN.io.infiniteExc := false.B
roundRawFNToRecFN.io.in := mulRawFN.io.rawOut
roundRawFNToRecFN.io.roundingMode := io.roundingMode
roundRawFNToRecFN.io.detectTininess := io.detectTininess
io.out := roundRawFNToRecFN.io.out
io.exceptionFlags := roundRawFNToRecFN.io.exceptionFlags
}
| module MulRawFN_32( // @[MulRecFN.scala:75:7]
input io_a_isNaN, // @[MulRecFN.scala:77:16]
input io_a_isInf, // @[MulRecFN.scala:77:16]
input io_a_isZero, // @[MulRecFN.scala:77:16]
input io_a_sign, // @[MulRecFN.scala:77:16]
input [9:0] io_a_sExp, // @[MulRecFN.scala:77:16]
input [24:0] io_a_sig, // @[MulRecFN.scala:77:16]
input io_b_isNaN, // @[MulRecFN.scala:77:16]
input io_b_isInf, // @[MulRecFN.scala:77:16]
input io_b_isZero, // @[MulRecFN.scala:77:16]
input io_b_sign, // @[MulRecFN.scala:77:16]
input [9:0] io_b_sExp, // @[MulRecFN.scala:77:16]
input [24:0] io_b_sig, // @[MulRecFN.scala:77:16]
output io_invalidExc, // @[MulRecFN.scala:77:16]
output io_rawOut_isNaN, // @[MulRecFN.scala:77:16]
output io_rawOut_isInf, // @[MulRecFN.scala:77:16]
output io_rawOut_isZero, // @[MulRecFN.scala:77:16]
output io_rawOut_sign, // @[MulRecFN.scala:77:16]
output [9:0] io_rawOut_sExp, // @[MulRecFN.scala:77:16]
output [26:0] io_rawOut_sig // @[MulRecFN.scala:77:16]
);
wire [47:0] _mulFullRaw_io_rawOut_sig; // @[MulRecFN.scala:84:28]
wire io_a_isNaN_0 = io_a_isNaN; // @[MulRecFN.scala:75:7]
wire io_a_isInf_0 = io_a_isInf; // @[MulRecFN.scala:75:7]
wire io_a_isZero_0 = io_a_isZero; // @[MulRecFN.scala:75:7]
wire io_a_sign_0 = io_a_sign; // @[MulRecFN.scala:75:7]
wire [9:0] io_a_sExp_0 = io_a_sExp; // @[MulRecFN.scala:75:7]
wire [24:0] io_a_sig_0 = io_a_sig; // @[MulRecFN.scala:75:7]
wire io_b_isNaN_0 = io_b_isNaN; // @[MulRecFN.scala:75:7]
wire io_b_isInf_0 = io_b_isInf; // @[MulRecFN.scala:75:7]
wire io_b_isZero_0 = io_b_isZero; // @[MulRecFN.scala:75:7]
wire io_b_sign_0 = io_b_sign; // @[MulRecFN.scala:75:7]
wire [9:0] io_b_sExp_0 = io_b_sExp; // @[MulRecFN.scala:75:7]
wire [24:0] io_b_sig_0 = io_b_sig; // @[MulRecFN.scala:75:7]
wire [26:0] _io_rawOut_sig_T_3; // @[MulRecFN.scala:93:10]
wire io_rawOut_isNaN_0; // @[MulRecFN.scala:75:7]
wire io_rawOut_isInf_0; // @[MulRecFN.scala:75:7]
wire io_rawOut_isZero_0; // @[MulRecFN.scala:75:7]
wire io_rawOut_sign_0; // @[MulRecFN.scala:75:7]
wire [9:0] io_rawOut_sExp_0; // @[MulRecFN.scala:75:7]
wire [26:0] io_rawOut_sig_0; // @[MulRecFN.scala:75:7]
wire io_invalidExc_0; // @[MulRecFN.scala:75:7]
wire [25:0] _io_rawOut_sig_T = _mulFullRaw_io_rawOut_sig[47:22]; // @[MulRecFN.scala:84:28, :93:15]
wire [21:0] _io_rawOut_sig_T_1 = _mulFullRaw_io_rawOut_sig[21:0]; // @[MulRecFN.scala:84:28, :93:37]
wire _io_rawOut_sig_T_2 = |_io_rawOut_sig_T_1; // @[MulRecFN.scala:93:{37,55}]
assign _io_rawOut_sig_T_3 = {_io_rawOut_sig_T, _io_rawOut_sig_T_2}; // @[MulRecFN.scala:93:{10,15,55}]
assign io_rawOut_sig_0 = _io_rawOut_sig_T_3; // @[MulRecFN.scala:75:7, :93:10]
MulFullRawFN_32 mulFullRaw ( // @[MulRecFN.scala:84:28]
.io_a_isNaN (io_a_isNaN_0), // @[MulRecFN.scala:75:7]
.io_a_isInf (io_a_isInf_0), // @[MulRecFN.scala:75:7]
.io_a_isZero (io_a_isZero_0), // @[MulRecFN.scala:75:7]
.io_a_sign (io_a_sign_0), // @[MulRecFN.scala:75:7]
.io_a_sExp (io_a_sExp_0), // @[MulRecFN.scala:75:7]
.io_a_sig (io_a_sig_0), // @[MulRecFN.scala:75:7]
.io_b_isNaN (io_b_isNaN_0), // @[MulRecFN.scala:75:7]
.io_b_isInf (io_b_isInf_0), // @[MulRecFN.scala:75:7]
.io_b_isZero (io_b_isZero_0), // @[MulRecFN.scala:75:7]
.io_b_sign (io_b_sign_0), // @[MulRecFN.scala:75:7]
.io_b_sExp (io_b_sExp_0), // @[MulRecFN.scala:75:7]
.io_b_sig (io_b_sig_0), // @[MulRecFN.scala:75:7]
.io_invalidExc (io_invalidExc_0),
.io_rawOut_isNaN (io_rawOut_isNaN_0),
.io_rawOut_isInf (io_rawOut_isInf_0),
.io_rawOut_isZero (io_rawOut_isZero_0),
.io_rawOut_sign (io_rawOut_sign_0),
.io_rawOut_sExp (io_rawOut_sExp_0),
.io_rawOut_sig (_mulFullRaw_io_rawOut_sig)
); // @[MulRecFN.scala:84:28]
assign io_invalidExc = io_invalidExc_0; // @[MulRecFN.scala:75:7]
assign io_rawOut_isNaN = io_rawOut_isNaN_0; // @[MulRecFN.scala:75:7]
assign io_rawOut_isInf = io_rawOut_isInf_0; // @[MulRecFN.scala:75:7]
assign io_rawOut_isZero = io_rawOut_isZero_0; // @[MulRecFN.scala:75:7]
assign io_rawOut_sign = io_rawOut_sign_0; // @[MulRecFN.scala:75:7]
assign io_rawOut_sExp = io_rawOut_sExp_0; // @[MulRecFN.scala:75:7]
assign io_rawOut_sig = io_rawOut_sig_0; // @[MulRecFN.scala:75:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Multiplier.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.rocket
import chisel3._
import chisel3.util.{Cat, log2Up, log2Ceil, log2Floor, Log2, Decoupled, Enum, Fill, Valid, Pipe}
import freechips.rocketchip.util._
import ALU._
class MultiplierReq(dataBits: Int, tagBits: Int) extends Bundle {
val fn = Bits(SZ_ALU_FN.W)
val dw = Bits(SZ_DW.W)
val in1 = Bits(dataBits.W)
val in2 = Bits(dataBits.W)
val tag = UInt(tagBits.W)
}
class MultiplierResp(dataBits: Int, tagBits: Int) extends Bundle {
val data = Bits(dataBits.W)
val full_data = Bits((2*dataBits).W)
val tag = UInt(tagBits.W)
}
class MultiplierIO(val dataBits: Int, val tagBits: Int) extends Bundle {
val req = Flipped(Decoupled(new MultiplierReq(dataBits, tagBits)))
val kill = Input(Bool())
val resp = Decoupled(new MultiplierResp(dataBits, tagBits))
}
case class MulDivParams(
mulUnroll: Int = 1,
divUnroll: Int = 1,
mulEarlyOut: Boolean = false,
divEarlyOut: Boolean = false,
divEarlyOutGranularity: Int = 1
)
class MulDiv(cfg: MulDivParams, width: Int, nXpr: Int = 32) extends Module {
private def minDivLatency = (cfg.divUnroll > 0).option(if (cfg.divEarlyOut) 3 else 1 + w/cfg.divUnroll)
private def minMulLatency = (cfg.mulUnroll > 0).option(if (cfg.mulEarlyOut) 2 else w/cfg.mulUnroll)
def minLatency: Int = (minDivLatency ++ minMulLatency).min
val io = IO(new MultiplierIO(width, log2Up(nXpr)))
val w = io.req.bits.in1.getWidth
val mulw = if (cfg.mulUnroll == 0) w else (w + cfg.mulUnroll - 1) / cfg.mulUnroll * cfg.mulUnroll
val fastMulW = if (cfg.mulUnroll == 0) false else w/2 > cfg.mulUnroll && w % (2*cfg.mulUnroll) == 0
val s_ready :: s_neg_inputs :: s_mul :: s_div :: s_dummy :: s_neg_output :: s_done_mul :: s_done_div :: Nil = Enum(8)
val state = RegInit(s_ready)
val req = Reg(chiselTypeOf(io.req.bits))
val count = Reg(UInt(log2Ceil(
((cfg.divUnroll != 0).option(w/cfg.divUnroll + 1).toSeq ++
(cfg.mulUnroll != 0).option(mulw/cfg.mulUnroll)).reduce(_ max _)).W))
val neg_out = Reg(Bool())
val isHi = Reg(Bool())
val resHi = Reg(Bool())
val divisor = Reg(Bits((w+1).W)) // div only needs w bits
val remainder = Reg(Bits((2*mulw+2).W)) // div only needs 2*w+1 bits
val mulDecode = List(
FN_MUL -> List(Y, N, X, X),
FN_MULH -> List(Y, Y, Y, Y),
FN_MULHU -> List(Y, Y, N, N),
FN_MULHSU -> List(Y, Y, Y, N))
val divDecode = List(
FN_DIV -> List(N, N, Y, Y),
FN_REM -> List(N, Y, Y, Y),
FN_DIVU -> List(N, N, N, N),
FN_REMU -> List(N, Y, N, N))
val cmdMul :: cmdHi :: lhsSigned :: rhsSigned :: Nil =
DecodeLogic(io.req.bits.fn, List(X, X, X, X),
(if (cfg.divUnroll != 0) divDecode else Nil) ++ (if (cfg.mulUnroll != 0) mulDecode else Nil)).map(_.asBool)
require(w == 32 || w == 64)
def halfWidth(req: MultiplierReq) = (w > 32).B && req.dw === DW_32
def sext(x: Bits, halfW: Bool, signed: Bool) = {
val sign = signed && Mux(halfW, x(w/2-1), x(w-1))
val hi = Mux(halfW, Fill(w/2, sign), x(w-1,w/2))
(Cat(hi, x(w/2-1,0)), sign)
}
val (lhs_in, lhs_sign) = sext(io.req.bits.in1, halfWidth(io.req.bits), lhsSigned)
val (rhs_in, rhs_sign) = sext(io.req.bits.in2, halfWidth(io.req.bits), rhsSigned)
val subtractor = remainder(2*w,w) - divisor
val result = Mux(resHi, remainder(2*w, w+1), remainder(w-1, 0))
val negated_remainder = -result
if (cfg.divUnroll != 0) when (state === s_neg_inputs) {
when (remainder(w-1)) {
remainder := negated_remainder
}
when (divisor(w-1)) {
divisor := subtractor
}
state := s_div
}
if (cfg.divUnroll != 0) when (state === s_neg_output) {
remainder := negated_remainder
state := s_done_div
resHi := false.B
}
if (cfg.mulUnroll != 0) when (state === s_mul) {
val mulReg = Cat(remainder(2*mulw+1,w+1),remainder(w-1,0))
val mplierSign = remainder(w)
val mplier = mulReg(mulw-1,0)
val accum = mulReg(2*mulw,mulw).asSInt
val mpcand = divisor.asSInt
val prod = Cat(mplierSign, mplier(cfg.mulUnroll-1, 0)).asSInt * mpcand + accum
val nextMulReg = Cat(prod, mplier(mulw-1, cfg.mulUnroll))
val nextMplierSign = count === (mulw/cfg.mulUnroll-2).U && neg_out
val eOutMask = ((BigInt(-1) << mulw).S >> (count * cfg.mulUnroll.U)(log2Up(mulw)-1,0))(mulw-1,0)
val eOut = (cfg.mulEarlyOut).B && count =/= (mulw/cfg.mulUnroll-1).U && count =/= 0.U &&
!isHi && (mplier & ~eOutMask) === 0.U
val eOutRes = (mulReg >> (mulw.U - count * cfg.mulUnroll.U)(log2Up(mulw)-1,0))
val nextMulReg1 = Cat(nextMulReg(2*mulw,mulw), Mux(eOut, eOutRes, nextMulReg)(mulw-1,0))
remainder := Cat(nextMulReg1 >> w, nextMplierSign, nextMulReg1(w-1,0))
count := count + 1.U
when (eOut || count === (mulw/cfg.mulUnroll-1).U) {
state := s_done_mul
resHi := isHi
}
}
if (cfg.divUnroll != 0) when (state === s_div) {
val unrolls = ((0 until cfg.divUnroll) scanLeft remainder) { case (rem, i) =>
// the special case for iteration 0 is to save HW, not for correctness
val difference = if (i == 0) subtractor else rem(2*w,w) - divisor(w-1,0)
val less = difference(w)
Cat(Mux(less, rem(2*w-1,w), difference(w-1,0)), rem(w-1,0), !less)
}.tail
remainder := unrolls.last
when (count === (w/cfg.divUnroll).U) {
state := Mux(neg_out, s_neg_output, s_done_div)
resHi := isHi
if (w % cfg.divUnroll < cfg.divUnroll - 1)
remainder := unrolls(w % cfg.divUnroll)
}
count := count + 1.U
val divby0 = count === 0.U && !subtractor(w)
if (cfg.divEarlyOut) {
val align = 1 << log2Floor(cfg.divUnroll max cfg.divEarlyOutGranularity)
val alignMask = ~((align-1).U(log2Ceil(w).W))
val divisorMSB = Log2(divisor(w-1,0), w) & alignMask
val dividendMSB = Log2(remainder(w-1,0), w) | ~alignMask
val eOutPos = ~(dividendMSB - divisorMSB)
val eOut = count === 0.U && !divby0 && eOutPos >= align.U
when (eOut) {
remainder := remainder(w-1,0) << eOutPos
count := eOutPos >> log2Floor(cfg.divUnroll)
}
}
when (divby0 && !isHi) { neg_out := false.B }
}
when (io.resp.fire || io.kill) {
state := s_ready
}
when (io.req.fire) {
state := Mux(cmdMul, s_mul, Mux(lhs_sign || rhs_sign, s_neg_inputs, s_div))
isHi := cmdHi
resHi := false.B
count := (if (fastMulW) Mux[UInt](cmdMul && halfWidth(io.req.bits), (w/cfg.mulUnroll/2).U, 0.U) else 0.U)
neg_out := Mux(cmdHi, lhs_sign, lhs_sign =/= rhs_sign)
divisor := Cat(rhs_sign, rhs_in)
remainder := lhs_in
req := io.req.bits
}
val outMul = (state & (s_done_mul ^ s_done_div)) === (s_done_mul & ~s_done_div)
val loOut = Mux(fastMulW.B && halfWidth(req) && outMul, result(w-1,w/2), result(w/2-1,0))
val hiOut = Mux(halfWidth(req), Fill(w/2, loOut(w/2-1)), result(w-1,w/2))
io.resp.bits.tag := req.tag
io.resp.bits.data := Cat(hiOut, loOut)
io.resp.bits.full_data := Cat(remainder(2*w, w+1), remainder(w-1, 0))
io.resp.valid := (state === s_done_mul || state === s_done_div)
io.req.ready := state === s_ready
}
class PipelinedMultiplier(width: Int, latency: Int, nXpr: Int = 32) extends Module with ShouldBeRetimed {
val io = IO(new Bundle {
val req = Flipped(Valid(new MultiplierReq(width, log2Ceil(nXpr))))
val resp = Valid(new MultiplierResp(width, log2Ceil(nXpr)))
})
val in = Pipe(io.req)
val decode = List(
FN_MUL -> List(N, X, X),
FN_MULH -> List(Y, Y, Y),
FN_MULHU -> List(Y, N, N),
FN_MULHSU -> List(Y, Y, N))
val cmdHi :: lhsSigned :: rhsSigned :: Nil =
DecodeLogic(in.bits.fn, List(X, X, X), decode).map(_.asBool)
val cmdHalf = (width > 32).B && in.bits.dw === DW_32
val lhs = Cat(lhsSigned && in.bits.in1(width-1), in.bits.in1).asSInt
val rhs = Cat(rhsSigned && in.bits.in2(width-1), in.bits.in2).asSInt
val prod = lhs * rhs
val muxed = Mux(cmdHi, prod(2*width-1, width), Mux(cmdHalf, prod(width/2-1, 0).sextTo(width), prod(width-1, 0)))
val resp = Pipe(in, latency-1)
io.resp.valid := resp.valid
io.resp.bits.tag := resp.bits.tag
io.resp.bits.data := Pipe(in.valid, muxed, latency-1).bits
io.resp.bits.full_data := Pipe(in.valid, prod, latency-1).bits.asUInt
}
| module PipelinedMultiplier_1( // @[Multiplier.scala:186:7]
input clock, // @[Multiplier.scala:186:7]
input reset, // @[Multiplier.scala:186:7]
input io_req_valid, // @[Multiplier.scala:187:14]
input [4:0] io_req_bits_fn, // @[Multiplier.scala:187:14]
input io_req_bits_dw, // @[Multiplier.scala:187:14]
input [63:0] io_req_bits_in1, // @[Multiplier.scala:187:14]
input [63:0] io_req_bits_in2, // @[Multiplier.scala:187:14]
output [63:0] io_resp_bits_data // @[Multiplier.scala:187:14]
);
wire io_req_valid_0 = io_req_valid; // @[Multiplier.scala:186:7]
wire [4:0] io_req_bits_fn_0 = io_req_bits_fn; // @[Multiplier.scala:186:7]
wire io_req_bits_dw_0 = io_req_bits_dw; // @[Multiplier.scala:186:7]
wire [63:0] io_req_bits_in1_0 = io_req_bits_in1; // @[Multiplier.scala:186:7]
wire [63:0] io_req_bits_in2_0 = io_req_bits_in2; // @[Multiplier.scala:186:7]
wire [4:0] io_req_bits_tag = 5'h0; // @[Multiplier.scala:186:7]
wire [4:0] in_bits_tag = 5'h0; // @[Valid.scala:135:21]
wire resp_valid; // @[Valid.scala:135:21]
wire [63:0] io_resp_bits_data_pipe_pipe_out_bits; // @[Valid.scala:135:21]
wire [4:0] resp_bits_tag; // @[Valid.scala:135:21]
wire [63:0] io_resp_bits_data_0; // @[Multiplier.scala:186:7]
wire [127:0] io_resp_bits_full_data; // @[Multiplier.scala:186:7]
wire [4:0] io_resp_bits_tag; // @[Multiplier.scala:186:7]
wire io_resp_valid; // @[Multiplier.scala:186:7]
reg in_pipe_v; // @[Valid.scala:141:24]
wire in_valid = in_pipe_v; // @[Valid.scala:135:21, :141:24]
reg [4:0] in_pipe_b_fn; // @[Valid.scala:142:26]
wire [4:0] in_bits_fn = in_pipe_b_fn; // @[Valid.scala:135:21, :142:26]
reg in_pipe_b_dw; // @[Valid.scala:142:26]
wire in_bits_dw = in_pipe_b_dw; // @[Valid.scala:135:21, :142:26]
reg [63:0] in_pipe_b_in1; // @[Valid.scala:142:26]
wire [63:0] in_bits_in1 = in_pipe_b_in1; // @[Valid.scala:135:21, :142:26]
reg [63:0] in_pipe_b_in2; // @[Valid.scala:142:26]
wire [63:0] in_bits_in2 = in_pipe_b_in2; // @[Valid.scala:135:21, :142:26]
wire [1:0] decoded_plaInput; // @[pla.scala:77:22]
wire [1:0] decoded_invInputs = ~decoded_plaInput; // @[pla.scala:77:22, :78:21]
wire [2:0] decoded_invMatrixOutputs; // @[pla.scala:120:37]
wire [2:0] decoded; // @[pla.scala:81:23]
wire decoded_andMatrixOutputs_andMatrixInput_0 = decoded_invInputs[1]; // @[pla.scala:78:21, :91:29]
wire decoded_andMatrixOutputs_0_2 = decoded_andMatrixOutputs_andMatrixInput_0; // @[pla.scala:91:29, :98:70]
wire _decoded_orMatrixOutputs_T = decoded_andMatrixOutputs_0_2; // @[pla.scala:98:70, :114:36]
wire decoded_andMatrixOutputs_andMatrixInput_0_1 = decoded_plaInput[0]; // @[pla.scala:77:22, :90:45]
wire decoded_andMatrixOutputs_1_2 = decoded_andMatrixOutputs_andMatrixInput_0_1; // @[pla.scala:90:45, :98:70]
wire decoded_andMatrixOutputs_andMatrixInput_0_2 = decoded_plaInput[1]; // @[pla.scala:77:22, :90:45]
wire decoded_andMatrixOutputs_andMatrixInput_1 = decoded_plaInput[1]; // @[pla.scala:77:22, :90:45]
wire decoded_andMatrixOutputs_2_2 = decoded_andMatrixOutputs_andMatrixInput_0_2; // @[pla.scala:90:45, :98:70]
wire decoded_andMatrixOutputs_andMatrixInput_0_3 = decoded_invInputs[0]; // @[pla.scala:78:21, :91:29]
wire [1:0] _decoded_andMatrixOutputs_T = {decoded_andMatrixOutputs_andMatrixInput_0_3, decoded_andMatrixOutputs_andMatrixInput_1}; // @[pla.scala:90:45, :91:29, :98:53]
wire decoded_andMatrixOutputs_3_2 = &_decoded_andMatrixOutputs_T; // @[pla.scala:98:{53,70}]
wire [1:0] _decoded_orMatrixOutputs_T_1 = {decoded_andMatrixOutputs_0_2, decoded_andMatrixOutputs_3_2}; // @[pla.scala:98:70, :114:19]
wire _decoded_orMatrixOutputs_T_2 = |_decoded_orMatrixOutputs_T_1; // @[pla.scala:114:{19,36}]
wire [1:0] _decoded_orMatrixOutputs_T_3 = {decoded_andMatrixOutputs_1_2, decoded_andMatrixOutputs_2_2}; // @[pla.scala:98:70, :114:19]
wire _decoded_orMatrixOutputs_T_4 = |_decoded_orMatrixOutputs_T_3; // @[pla.scala:114:{19,36}]
wire [1:0] decoded_orMatrixOutputs_hi = {_decoded_orMatrixOutputs_T_4, _decoded_orMatrixOutputs_T_2}; // @[pla.scala:102:36, :114:36]
wire [2:0] decoded_orMatrixOutputs = {decoded_orMatrixOutputs_hi, _decoded_orMatrixOutputs_T}; // @[pla.scala:102:36, :114:36]
wire _decoded_invMatrixOutputs_T = decoded_orMatrixOutputs[0]; // @[pla.scala:102:36, :124:31]
wire _decoded_invMatrixOutputs_T_1 = decoded_orMatrixOutputs[1]; // @[pla.scala:102:36, :124:31]
wire _decoded_invMatrixOutputs_T_2 = decoded_orMatrixOutputs[2]; // @[pla.scala:102:36, :124:31]
wire [1:0] decoded_invMatrixOutputs_hi = {_decoded_invMatrixOutputs_T_2, _decoded_invMatrixOutputs_T_1}; // @[pla.scala:120:37, :124:31]
assign decoded_invMatrixOutputs = {decoded_invMatrixOutputs_hi, _decoded_invMatrixOutputs_T}; // @[pla.scala:120:37, :124:31]
assign decoded = decoded_invMatrixOutputs; // @[pla.scala:81:23, :120:37]
assign decoded_plaInput = in_bits_fn[1:0]; // @[pla.scala:77:22]
wire cmdHi = decoded[2]; // @[pla.scala:81:23]
wire lhsSigned = decoded[1]; // @[pla.scala:81:23]
wire rhsSigned = decoded[0]; // @[pla.scala:81:23]
wire _cmdHalf_T = ~in_bits_dw; // @[Valid.scala:135:21]
wire cmdHalf = _cmdHalf_T; // @[Multiplier.scala:201:{32,46}]
wire _lhs_T = in_bits_in1[63]; // @[Valid.scala:135:21]
wire _lhs_T_1 = lhsSigned & _lhs_T; // @[Multiplier.scala:200:58, :203:{27,41}]
wire [64:0] _lhs_T_2 = {_lhs_T_1, in_bits_in1}; // @[Valid.scala:135:21]
wire [64:0] lhs = _lhs_T_2; // @[Multiplier.scala:203:{16,65}]
wire _rhs_T = in_bits_in2[63]; // @[Valid.scala:135:21]
wire _rhs_T_1 = rhsSigned & _rhs_T; // @[Multiplier.scala:200:58, :204:{27,41}]
wire [64:0] _rhs_T_2 = {_rhs_T_1, in_bits_in2}; // @[Valid.scala:135:21]
wire [64:0] rhs = _rhs_T_2; // @[Multiplier.scala:204:{16,65}]
wire [129:0] prod = {{65{lhs[64]}}, lhs} * {{65{rhs[64]}}, rhs}; // @[Multiplier.scala:203:65, :204:65, :205:18]
wire [63:0] _muxed_T = prod[127:64]; // @[Multiplier.scala:205:18, :206:30]
wire [31:0] _muxed_T_1 = prod[31:0]; // @[Multiplier.scala:205:18, :206:67]
wire _muxed_T_2 = _muxed_T_1[31]; // @[package.scala:132:38]
wire [31:0] _muxed_T_3 = {32{_muxed_T_2}}; // @[package.scala:132:{20,38}]
wire [63:0] _muxed_T_4 = {_muxed_T_3, _muxed_T_1}; // @[package.scala:132:{15,20}]
wire [63:0] _muxed_T_5 = prod[63:0]; // @[Multiplier.scala:205:18, :206:101]
wire [63:0] _muxed_T_6 = cmdHalf ? _muxed_T_4 : _muxed_T_5; // @[package.scala:132:15]
wire [63:0] muxed = cmdHi ? _muxed_T : _muxed_T_6; // @[Multiplier.scala:200:58, :206:{18,30,53}]
reg resp_pipe_v; // @[Valid.scala:141:24]
reg [4:0] resp_pipe_b_fn; // @[Valid.scala:142:26]
reg resp_pipe_b_dw; // @[Valid.scala:142:26]
reg [63:0] resp_pipe_b_in1; // @[Valid.scala:142:26]
reg [63:0] resp_pipe_b_in2; // @[Valid.scala:142:26]
reg [4:0] resp_pipe_b_tag; // @[Valid.scala:142:26]
reg resp_pipe_pipe_v; // @[Valid.scala:141:24]
assign resp_valid = resp_pipe_pipe_v; // @[Valid.scala:135:21, :141:24]
reg [4:0] resp_pipe_pipe_b_fn; // @[Valid.scala:142:26]
wire [4:0] resp_bits_fn = resp_pipe_pipe_b_fn; // @[Valid.scala:135:21, :142:26]
reg resp_pipe_pipe_b_dw; // @[Valid.scala:142:26]
wire resp_bits_dw = resp_pipe_pipe_b_dw; // @[Valid.scala:135:21, :142:26]
reg [63:0] resp_pipe_pipe_b_in1; // @[Valid.scala:142:26]
wire [63:0] resp_bits_in1 = resp_pipe_pipe_b_in1; // @[Valid.scala:135:21, :142:26]
reg [63:0] resp_pipe_pipe_b_in2; // @[Valid.scala:142:26]
wire [63:0] resp_bits_in2 = resp_pipe_pipe_b_in2; // @[Valid.scala:135:21, :142:26]
reg [4:0] resp_pipe_pipe_b_tag; // @[Valid.scala:142:26]
assign resp_bits_tag = resp_pipe_pipe_b_tag; // @[Valid.scala:135:21, :142:26]
assign io_resp_valid = resp_valid; // @[Valid.scala:135:21]
assign io_resp_bits_tag = resp_bits_tag; // @[Valid.scala:135:21]
reg io_resp_bits_data_pipe_v; // @[Valid.scala:141:24]
reg [63:0] io_resp_bits_data_pipe_b; // @[Valid.scala:142:26]
reg io_resp_bits_data_pipe_pipe_v; // @[Valid.scala:141:24]
wire io_resp_bits_data_pipe_pipe_out_valid = io_resp_bits_data_pipe_pipe_v; // @[Valid.scala:135:21, :141:24]
reg [63:0] io_resp_bits_data_pipe_pipe_b; // @[Valid.scala:142:26]
assign io_resp_bits_data_pipe_pipe_out_bits = io_resp_bits_data_pipe_pipe_b; // @[Valid.scala:135:21, :142:26]
assign io_resp_bits_data_0 = io_resp_bits_data_pipe_pipe_out_bits; // @[Valid.scala:135:21]
reg io_resp_bits_full_data_pipe_v; // @[Valid.scala:141:24]
reg [129:0] io_resp_bits_full_data_pipe_b; // @[Valid.scala:142:26]
reg io_resp_bits_full_data_pipe_pipe_v; // @[Valid.scala:141:24]
wire io_resp_bits_full_data_pipe_pipe_out_valid = io_resp_bits_full_data_pipe_pipe_v; // @[Valid.scala:135:21, :141:24]
reg [129:0] io_resp_bits_full_data_pipe_pipe_b; // @[Valid.scala:142:26]
wire [129:0] io_resp_bits_full_data_pipe_pipe_out_bits = io_resp_bits_full_data_pipe_pipe_b; // @[Valid.scala:135:21, :142:26]
wire [129:0] _io_resp_bits_full_data_T = io_resp_bits_full_data_pipe_pipe_out_bits; // @[Valid.scala:135:21]
assign io_resp_bits_full_data = _io_resp_bits_full_data_T[127:0]; // @[Multiplier.scala:186:7, :212:{26,66}]
always @(posedge clock) begin // @[Multiplier.scala:186:7]
if (reset) begin // @[Multiplier.scala:186:7]
in_pipe_v <= 1'h0; // @[Valid.scala:141:24]
resp_pipe_v <= 1'h0; // @[Valid.scala:141:24]
resp_pipe_pipe_v <= 1'h0; // @[Valid.scala:141:24]
io_resp_bits_data_pipe_v <= 1'h0; // @[Valid.scala:141:24]
io_resp_bits_data_pipe_pipe_v <= 1'h0; // @[Valid.scala:141:24]
io_resp_bits_full_data_pipe_v <= 1'h0; // @[Valid.scala:141:24]
io_resp_bits_full_data_pipe_pipe_v <= 1'h0; // @[Valid.scala:141:24]
end
else begin // @[Multiplier.scala:186:7]
in_pipe_v <= io_req_valid_0; // @[Valid.scala:141:24]
resp_pipe_v <= in_valid; // @[Valid.scala:135:21, :141:24]
resp_pipe_pipe_v <= resp_pipe_v; // @[Valid.scala:141:24]
io_resp_bits_data_pipe_v <= in_valid; // @[Valid.scala:135:21, :141:24]
io_resp_bits_data_pipe_pipe_v <= io_resp_bits_data_pipe_v; // @[Valid.scala:141:24]
io_resp_bits_full_data_pipe_v <= in_valid; // @[Valid.scala:135:21, :141:24]
io_resp_bits_full_data_pipe_pipe_v <= io_resp_bits_full_data_pipe_v; // @[Valid.scala:141:24]
end
if (io_req_valid_0) begin // @[Multiplier.scala:186:7]
in_pipe_b_fn <= io_req_bits_fn_0; // @[Valid.scala:142:26]
in_pipe_b_dw <= io_req_bits_dw_0; // @[Valid.scala:142:26]
in_pipe_b_in1 <= io_req_bits_in1_0; // @[Valid.scala:142:26]
in_pipe_b_in2 <= io_req_bits_in2_0; // @[Valid.scala:142:26]
end
if (in_valid) begin // @[Valid.scala:135:21]
resp_pipe_b_fn <= in_bits_fn; // @[Valid.scala:135:21, :142:26]
resp_pipe_b_dw <= in_bits_dw; // @[Valid.scala:135:21, :142:26]
resp_pipe_b_in1 <= in_bits_in1; // @[Valid.scala:135:21, :142:26]
resp_pipe_b_in2 <= in_bits_in2; // @[Valid.scala:135:21, :142:26]
resp_pipe_b_tag <= 5'h0; // @[Valid.scala:142:26]
io_resp_bits_data_pipe_b <= muxed; // @[Valid.scala:142:26]
io_resp_bits_full_data_pipe_b <= prod; // @[Valid.scala:142:26]
end
if (resp_pipe_v) begin // @[Valid.scala:141:24]
resp_pipe_pipe_b_fn <= resp_pipe_b_fn; // @[Valid.scala:142:26]
resp_pipe_pipe_b_dw <= resp_pipe_b_dw; // @[Valid.scala:142:26]
resp_pipe_pipe_b_in1 <= resp_pipe_b_in1; // @[Valid.scala:142:26]
resp_pipe_pipe_b_in2 <= resp_pipe_b_in2; // @[Valid.scala:142:26]
resp_pipe_pipe_b_tag <= resp_pipe_b_tag; // @[Valid.scala:142:26]
end
if (io_resp_bits_data_pipe_v) // @[Valid.scala:141:24]
io_resp_bits_data_pipe_pipe_b <= io_resp_bits_data_pipe_b; // @[Valid.scala:142:26]
if (io_resp_bits_full_data_pipe_v) // @[Valid.scala:141:24]
io_resp_bits_full_data_pipe_pipe_b <= io_resp_bits_full_data_pipe_b; // @[Valid.scala:142:26]
always @(posedge)
assign io_resp_bits_data = io_resp_bits_data_0; // @[Multiplier.scala:186:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File OutputUnit.scala:
package constellation.router
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.{Field, Parameters}
import constellation.channel._
import constellation.routing.{FlowRoutingBundle}
import constellation.noc.{HasNoCParams}
class OutputCreditAlloc extends Bundle {
val alloc = Bool()
val tail = Bool()
}
class OutputChannelStatus(implicit val p: Parameters) extends Bundle with HasNoCParams {
val occupied = Bool()
def available = !occupied
val flow = new FlowRoutingBundle
}
class OutputChannelAlloc(implicit val p: Parameters) extends Bundle with HasNoCParams {
val alloc = Bool()
val flow = new FlowRoutingBundle
}
class AbstractOutputUnitIO(
val inParams: Seq[ChannelParams],
val ingressParams: Seq[IngressChannelParams],
val cParam: BaseChannelParams
)(implicit val p: Parameters) extends Bundle with HasRouterInputParams {
val nodeId = cParam.srcId
val nVirtualChannels = cParam.nVirtualChannels
val in = Flipped(Vec(cParam.srcSpeedup, Valid(new Flit(cParam.payloadBits))))
val credit_available = Output(Vec(nVirtualChannels, Bool()))
val channel_status = Output(Vec(nVirtualChannels, new OutputChannelStatus))
val allocs = Input(Vec(nVirtualChannels, new OutputChannelAlloc))
val credit_alloc = Input(Vec(nVirtualChannels, new OutputCreditAlloc))
}
abstract class AbstractOutputUnit(
val inParams: Seq[ChannelParams],
val ingressParams: Seq[IngressChannelParams],
val cParam: BaseChannelParams
)(implicit val p: Parameters) extends Module with HasRouterInputParams with HasNoCParams {
val nodeId = cParam.srcId
def io: AbstractOutputUnitIO
}
class OutputUnit(inParams: Seq[ChannelParams], ingressParams: Seq[IngressChannelParams], cParam: ChannelParams)
(implicit p: Parameters) extends AbstractOutputUnit(inParams, ingressParams, cParam)(p) {
class OutputUnitIO extends AbstractOutputUnitIO(inParams, ingressParams, cParam) {
val out = new Channel(cParam.asInstanceOf[ChannelParams])
}
val io = IO(new OutputUnitIO)
class OutputState(val bufferSize: Int) extends Bundle {
val occupied = Bool()
val c = UInt(log2Up(1+bufferSize).W)
val flow = new FlowRoutingBundle
}
val states = Reg(MixedVec(cParam.virtualChannelParams.map { u => new OutputState(u.bufferSize) }))
(states zip io.channel_status).map { case (s,a) =>
a.occupied := s.occupied
a.flow := s.flow
}
io.out.flit := io.in
states.zipWithIndex.map { case (s,i) => if (cParam.virtualChannelParams(i).traversable) {
when (io.out.vc_free(i)) {
assert(s.occupied)
s.occupied := false.B
}
} }
(states zip io.allocs).zipWithIndex.map { case ((s,a),i) => if (cParam.virtualChannelParams(i).traversable) {
when (a.alloc) {
s.occupied := true.B
s.flow := a.flow
}
} }
(io.credit_available zip states).zipWithIndex.map { case ((c,s),i) =>
c := s.c =/= 0.U //|| (io.out.credit_return.valid && io.out.credit_return.bits === i.U)
}
states.zipWithIndex.map { case (s,i) =>
val free = io.out.credit_return(i)
val alloc = io.credit_alloc(i).alloc
if (cParam.virtualChannelParams(i).traversable) {
s.c := s.c +& free - alloc
}
}
when (reset.asBool) {
states.foreach(_.occupied := false.B)
states.foreach(s => s.c := s.bufferSize.U)
}
}
| module OutputUnit_107( // @[OutputUnit.scala:52:7]
input clock, // @[OutputUnit.scala:52:7]
input reset, // @[OutputUnit.scala:52:7]
input io_in_0_valid, // @[OutputUnit.scala:58:14]
input io_in_0_bits_head, // @[OutputUnit.scala:58:14]
input io_in_0_bits_tail, // @[OutputUnit.scala:58:14]
input [72:0] io_in_0_bits_payload, // @[OutputUnit.scala:58:14]
input [2:0] io_in_0_bits_flow_vnet_id, // @[OutputUnit.scala:58:14]
input [3:0] io_in_0_bits_flow_ingress_node, // @[OutputUnit.scala:58:14]
input [1:0] io_in_0_bits_flow_ingress_node_id, // @[OutputUnit.scala:58:14]
input [3:0] io_in_0_bits_flow_egress_node, // @[OutputUnit.scala:58:14]
input [2:0] io_in_0_bits_flow_egress_node_id, // @[OutputUnit.scala:58:14]
input [3:0] io_in_0_bits_virt_channel_id, // @[OutputUnit.scala:58:14]
output io_credit_available_8, // @[OutputUnit.scala:58:14]
output io_credit_available_9, // @[OutputUnit.scala:58:14]
output io_channel_status_8_occupied, // @[OutputUnit.scala:58:14]
output io_channel_status_9_occupied, // @[OutputUnit.scala:58:14]
input io_allocs_8_alloc, // @[OutputUnit.scala:58:14]
input io_allocs_9_alloc, // @[OutputUnit.scala:58:14]
input io_credit_alloc_8_alloc, // @[OutputUnit.scala:58:14]
input io_credit_alloc_9_alloc, // @[OutputUnit.scala:58:14]
output io_out_flit_0_valid, // @[OutputUnit.scala:58:14]
output io_out_flit_0_bits_head, // @[OutputUnit.scala:58:14]
output io_out_flit_0_bits_tail, // @[OutputUnit.scala:58:14]
output [72:0] io_out_flit_0_bits_payload, // @[OutputUnit.scala:58:14]
output [2:0] io_out_flit_0_bits_flow_vnet_id, // @[OutputUnit.scala:58:14]
output [3:0] io_out_flit_0_bits_flow_ingress_node, // @[OutputUnit.scala:58:14]
output [1:0] io_out_flit_0_bits_flow_ingress_node_id, // @[OutputUnit.scala:58:14]
output [3:0] io_out_flit_0_bits_flow_egress_node, // @[OutputUnit.scala:58:14]
output [2:0] io_out_flit_0_bits_flow_egress_node_id, // @[OutputUnit.scala:58:14]
output [3:0] io_out_flit_0_bits_virt_channel_id, // @[OutputUnit.scala:58:14]
input [9:0] io_out_credit_return, // @[OutputUnit.scala:58:14]
input [9:0] io_out_vc_free // @[OutputUnit.scala:58:14]
);
reg states_9_occupied; // @[OutputUnit.scala:66:19]
reg [2:0] states_9_c; // @[OutputUnit.scala:66:19]
reg states_8_occupied; // @[OutputUnit.scala:66:19]
reg [2:0] states_8_c; // @[OutputUnit.scala:66:19] |
Generate the Verilog code corresponding to the following Chisel files.
File Buffer.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.diplomacy.BufferParams
class TLBufferNode (
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)(implicit valName: ValName) extends TLAdapterNode(
clientFn = { p => p.v1copy(minLatency = p.minLatency + b.latency + c.latency) },
managerFn = { p => p.v1copy(minLatency = p.minLatency + a.latency + d.latency) }
) {
override lazy val nodedebugstring = s"a:${a.toString}, b:${b.toString}, c:${c.toString}, d:${d.toString}, e:${e.toString}"
override def circuitIdentity = List(a,b,c,d,e).forall(_ == BufferParams.none)
}
class TLBuffer(
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)(implicit p: Parameters) extends LazyModule
{
def this(ace: BufferParams, bd: BufferParams)(implicit p: Parameters) = this(ace, bd, ace, bd, ace)
def this(abcde: BufferParams)(implicit p: Parameters) = this(abcde, abcde)
def this()(implicit p: Parameters) = this(BufferParams.default)
val node = new TLBufferNode(a, b, c, d, e)
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
def headBundle = node.out.head._2.bundle
override def desiredName = (Seq("TLBuffer") ++ node.out.headOption.map(_._2.bundle.shortName)).mkString("_")
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
out.a <> a(in .a)
in .d <> d(out.d)
if (edgeOut.manager.anySupportAcquireB && edgeOut.client.anySupportProbe) {
in .b <> b(out.b)
out.c <> c(in .c)
out.e <> e(in .e)
} else {
in.b.valid := false.B
in.c.ready := true.B
in.e.ready := true.B
out.b.ready := true.B
out.c.valid := false.B
out.e.valid := false.B
}
}
}
}
object TLBuffer
{
def apply() (implicit p: Parameters): TLNode = apply(BufferParams.default)
def apply(abcde: BufferParams) (implicit p: Parameters): TLNode = apply(abcde, abcde)
def apply(ace: BufferParams, bd: BufferParams)(implicit p: Parameters): TLNode = apply(ace, bd, ace, bd, ace)
def apply(
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)(implicit p: Parameters): TLNode =
{
val buffer = LazyModule(new TLBuffer(a, b, c, d, e))
buffer.node
}
def chain(depth: Int, name: Option[String] = None)(implicit p: Parameters): Seq[TLNode] = {
val buffers = Seq.fill(depth) { LazyModule(new TLBuffer()) }
name.foreach { n => buffers.zipWithIndex.foreach { case (b, i) => b.suggestName(s"${n}_${i}") } }
buffers.map(_.node)
}
def chainNode(depth: Int, name: Option[String] = None)(implicit p: Parameters): TLNode = {
chain(depth, name)
.reduceLeftOption(_ :*=* _)
.getOrElse(TLNameNode("no_buffer"))
}
}
File Crossing.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.interrupts
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.util.{SynchronizerShiftReg, AsyncResetReg}
@deprecated("IntXing does not ensure interrupt source is glitch free. Use IntSyncSource and IntSyncSink", "rocket-chip 1.2")
class IntXing(sync: Int = 3)(implicit p: Parameters) extends LazyModule
{
val intnode = IntAdapterNode()
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
(intnode.in zip intnode.out) foreach { case ((in, _), (out, _)) =>
out := SynchronizerShiftReg(in, sync)
}
}
}
object IntSyncCrossingSource
{
def apply(alreadyRegistered: Boolean = false)(implicit p: Parameters) =
{
val intsource = LazyModule(new IntSyncCrossingSource(alreadyRegistered))
intsource.node
}
}
class IntSyncCrossingSource(alreadyRegistered: Boolean = false)(implicit p: Parameters) extends LazyModule
{
val node = IntSyncSourceNode(alreadyRegistered)
lazy val module = if (alreadyRegistered) (new ImplRegistered) else (new Impl)
class Impl extends LazyModuleImp(this) {
def outSize = node.out.headOption.map(_._1.sync.size).getOrElse(0)
override def desiredName = s"IntSyncCrossingSource_n${node.out.size}x${outSize}"
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
out.sync := AsyncResetReg(Cat(in.reverse)).asBools
}
}
class ImplRegistered extends LazyRawModuleImp(this) {
def outSize = node.out.headOption.map(_._1.sync.size).getOrElse(0)
override def desiredName = s"IntSyncCrossingSource_n${node.out.size}x${outSize}_Registered"
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
out.sync := in
}
}
}
object IntSyncCrossingSink
{
@deprecated("IntSyncCrossingSink which used the `sync` parameter to determine crossing type is deprecated. Use IntSyncAsyncCrossingSink, IntSyncRationalCrossingSink, or IntSyncSyncCrossingSink instead for > 1, 1, and 0 sync values respectively", "rocket-chip 1.2")
def apply(sync: Int = 3)(implicit p: Parameters) =
{
val intsink = LazyModule(new IntSyncAsyncCrossingSink(sync))
intsink.node
}
}
class IntSyncAsyncCrossingSink(sync: Int = 3)(implicit p: Parameters) extends LazyModule
{
val node = IntSyncSinkNode(sync)
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
override def desiredName = s"IntSyncAsyncCrossingSink_n${node.out.size}x${node.out.head._1.size}"
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
out := SynchronizerShiftReg(in.sync, sync)
}
}
}
object IntSyncAsyncCrossingSink
{
def apply(sync: Int = 3)(implicit p: Parameters) =
{
val intsink = LazyModule(new IntSyncAsyncCrossingSink(sync))
intsink.node
}
}
class IntSyncSyncCrossingSink()(implicit p: Parameters) extends LazyModule
{
val node = IntSyncSinkNode(0)
lazy val module = new Impl
class Impl extends LazyRawModuleImp(this) {
def outSize = node.out.headOption.map(_._1.size).getOrElse(0)
override def desiredName = s"IntSyncSyncCrossingSink_n${node.out.size}x${outSize}"
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
out := in.sync
}
}
}
object IntSyncSyncCrossingSink
{
def apply()(implicit p: Parameters) =
{
val intsink = LazyModule(new IntSyncSyncCrossingSink())
intsink.node
}
}
class IntSyncRationalCrossingSink()(implicit p: Parameters) extends LazyModule
{
val node = IntSyncSinkNode(1)
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
def outSize = node.out.headOption.map(_._1.size).getOrElse(0)
override def desiredName = s"IntSyncRationalCrossingSink_n${node.out.size}x${outSize}"
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
out := RegNext(in.sync)
}
}
}
object IntSyncRationalCrossingSink
{
def apply()(implicit p: Parameters) =
{
val intsink = LazyModule(new IntSyncRationalCrossingSink())
intsink.node
}
}
File ClockDomain.scala:
package freechips.rocketchip.prci
import chisel3._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy.lazymodule._
abstract class Domain(implicit p: Parameters) extends LazyModule with HasDomainCrossing
{
def clockBundle: ClockBundle
lazy val module = new Impl
class Impl extends LazyRawModuleImp(this) {
childClock := clockBundle.clock
childReset := clockBundle.reset
override def provideImplicitClockToLazyChildren = true
// these are just for backwards compatibility with external devices
// that were manually wiring themselves to the domain's clock/reset input:
val clock = IO(Output(chiselTypeOf(clockBundle.clock)))
val reset = IO(Output(chiselTypeOf(clockBundle.reset)))
clock := clockBundle.clock
reset := clockBundle.reset
}
}
abstract class ClockDomain(implicit p: Parameters) extends Domain with HasClockDomainCrossing
class ClockSinkDomain(val clockSinkParams: ClockSinkParameters)(implicit p: Parameters) extends ClockDomain
{
def this(take: Option[ClockParameters] = None, name: Option[String] = None)(implicit p: Parameters) = this(ClockSinkParameters(take = take, name = name))
val clockNode = ClockSinkNode(Seq(clockSinkParams))
def clockBundle = clockNode.in.head._1
override lazy val desiredName = (clockSinkParams.name.toSeq :+ "ClockSinkDomain").mkString
}
class ClockSourceDomain(val clockSourceParams: ClockSourceParameters)(implicit p: Parameters) extends ClockDomain
{
def this(give: Option[ClockParameters] = None, name: Option[String] = None)(implicit p: Parameters) = this(ClockSourceParameters(give = give, name = name))
val clockNode = ClockSourceNode(Seq(clockSourceParams))
def clockBundle = clockNode.out.head._1
override lazy val desiredName = (clockSourceParams.name.toSeq :+ "ClockSourceDomain").mkString
}
abstract class ResetDomain(implicit p: Parameters) extends Domain with HasResetDomainCrossing
File ClockGroup.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.prci
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.lazymodule._
import org.chipsalliance.diplomacy.nodes._
import freechips.rocketchip.resources.FixedClockResource
case class ClockGroupingNode(groupName: String)(implicit valName: ValName)
extends MixedNexusNode(ClockGroupImp, ClockImp)(
dFn = { _ => ClockSourceParameters() },
uFn = { seq => ClockGroupSinkParameters(name = groupName, members = seq) })
{
override def circuitIdentity = outputs.size == 1
}
class ClockGroup(groupName: String)(implicit p: Parameters) extends LazyModule
{
val node = ClockGroupingNode(groupName)
lazy val module = new Impl
class Impl extends LazyRawModuleImp(this) {
val (in, _) = node.in(0)
val (out, _) = node.out.unzip
require (node.in.size == 1)
require (in.member.size == out.size)
(in.member.data zip out) foreach { case (i, o) => o := i }
}
}
object ClockGroup
{
def apply()(implicit p: Parameters, valName: ValName) = LazyModule(new ClockGroup(valName.name)).node
}
case class ClockGroupAggregateNode(groupName: String)(implicit valName: ValName)
extends NexusNode(ClockGroupImp)(
dFn = { _ => ClockGroupSourceParameters() },
uFn = { seq => ClockGroupSinkParameters(name = groupName, members = seq.flatMap(_.members))})
{
override def circuitIdentity = outputs.size == 1
}
class ClockGroupAggregator(groupName: String)(implicit p: Parameters) extends LazyModule
{
val node = ClockGroupAggregateNode(groupName)
override lazy val desiredName = s"ClockGroupAggregator_$groupName"
lazy val module = new Impl
class Impl extends LazyRawModuleImp(this) {
val (in, _) = node.in.unzip
val (out, _) = node.out.unzip
val outputs = out.flatMap(_.member.data)
require (node.in.size == 1, s"Aggregator for groupName: ${groupName} had ${node.in.size} inward edges instead of 1")
require (in.head.member.size == outputs.size)
in.head.member.data.zip(outputs).foreach { case (i, o) => o := i }
}
}
object ClockGroupAggregator
{
def apply()(implicit p: Parameters, valName: ValName) = LazyModule(new ClockGroupAggregator(valName.name)).node
}
class SimpleClockGroupSource(numSources: Int = 1)(implicit p: Parameters) extends LazyModule
{
val node = ClockGroupSourceNode(List.fill(numSources) { ClockGroupSourceParameters() })
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
val (out, _) = node.out.unzip
out.map { out: ClockGroupBundle =>
out.member.data.foreach { o =>
o.clock := clock; o.reset := reset }
}
}
}
object SimpleClockGroupSource
{
def apply(num: Int = 1)(implicit p: Parameters, valName: ValName) = LazyModule(new SimpleClockGroupSource(num)).node
}
case class FixedClockBroadcastNode(fixedClockOpt: Option[ClockParameters])(implicit valName: ValName)
extends NexusNode(ClockImp)(
dFn = { seq => fixedClockOpt.map(_ => ClockSourceParameters(give = fixedClockOpt)).orElse(seq.headOption).getOrElse(ClockSourceParameters()) },
uFn = { seq => fixedClockOpt.map(_ => ClockSinkParameters(take = fixedClockOpt)).orElse(seq.headOption).getOrElse(ClockSinkParameters()) },
inputRequiresOutput = false) {
def fixedClockResources(name: String, prefix: String = "soc/"): Seq[Option[FixedClockResource]] = Seq(fixedClockOpt.map(t => new FixedClockResource(name, t.freqMHz, prefix)))
}
class FixedClockBroadcast(fixedClockOpt: Option[ClockParameters])(implicit p: Parameters) extends LazyModule
{
val node = new FixedClockBroadcastNode(fixedClockOpt) {
override def circuitIdentity = outputs.size == 1
}
lazy val module = new Impl
class Impl extends LazyRawModuleImp(this) {
val (in, _) = node.in(0)
val (out, _) = node.out.unzip
override def desiredName = s"FixedClockBroadcast_${out.size}"
require (node.in.size == 1, "FixedClockBroadcast can only broadcast a single clock")
out.foreach { _ := in }
}
}
object FixedClockBroadcast
{
def apply(fixedClockOpt: Option[ClockParameters] = None)(implicit p: Parameters, valName: ValName) = LazyModule(new FixedClockBroadcast(fixedClockOpt)).node
}
case class PRCIClockGroupNode()(implicit valName: ValName)
extends NexusNode(ClockGroupImp)(
dFn = { _ => ClockGroupSourceParameters() },
uFn = { _ => ClockGroupSinkParameters("prci", Nil) },
outputRequiresInput = false)
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
File MixedNode.scala:
package org.chipsalliance.diplomacy.nodes
import chisel3.{Data, DontCare, Wire}
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.{Field, Parameters}
import org.chipsalliance.diplomacy.ValName
import org.chipsalliance.diplomacy.sourceLine
/** One side metadata of a [[Dangle]].
*
* Describes one side of an edge going into or out of a [[BaseNode]].
*
* @param serial
* the global [[BaseNode.serial]] number of the [[BaseNode]] that this [[HalfEdge]] connects to.
* @param index
* the `index` in the [[BaseNode]]'s input or output port list that this [[HalfEdge]] belongs to.
*/
case class HalfEdge(serial: Int, index: Int) extends Ordered[HalfEdge] {
import scala.math.Ordered.orderingToOrdered
def compare(that: HalfEdge): Int = HalfEdge.unapply(this).compare(HalfEdge.unapply(that))
}
/** [[Dangle]] captures the `IO` information of a [[LazyModule]] and which two [[BaseNode]]s the [[Edges]]/[[Bundle]]
* connects.
*
* [[Dangle]]s are generated by [[BaseNode.instantiate]] using [[MixedNode.danglesOut]] and [[MixedNode.danglesIn]] ,
* [[LazyModuleImp.instantiate]] connects those that go to internal or explicit IO connections in a [[LazyModule]].
*
* @param source
* the source [[HalfEdge]] of this [[Dangle]], which captures the source [[BaseNode]] and the port `index` within
* that [[BaseNode]].
* @param sink
* sink [[HalfEdge]] of this [[Dangle]], which captures the sink [[BaseNode]] and the port `index` within that
* [[BaseNode]].
* @param flipped
* flip or not in [[AutoBundle.makeElements]]. If true this corresponds to `danglesOut`, if false it corresponds to
* `danglesIn`.
* @param dataOpt
* actual [[Data]] for the hardware connection. Can be empty if this belongs to a cloned module
*/
case class Dangle(source: HalfEdge, sink: HalfEdge, flipped: Boolean, name: String, dataOpt: Option[Data]) {
def data = dataOpt.get
}
/** [[Edges]] is a collection of parameters describing the functionality and connection for an interface, which is often
* derived from the interconnection protocol and can inform the parameterization of the hardware bundles that actually
* implement the protocol.
*/
case class Edges[EI, EO](in: Seq[EI], out: Seq[EO])
/** A field available in [[Parameters]] used to determine whether [[InwardNodeImp.monitor]] will be called. */
case object MonitorsEnabled extends Field[Boolean](true)
/** When rendering the edge in a graphical format, flip the order in which the edges' source and sink are presented.
*
* For example, when rendering graphML, yEd by default tries to put the source node vertically above the sink node, but
* [[RenderFlipped]] inverts this relationship. When a particular [[LazyModule]] contains both source nodes and sink
* nodes, flipping the rendering of one node's edge will usual produce a more concise visual layout for the
* [[LazyModule]].
*/
case object RenderFlipped extends Field[Boolean](false)
/** The sealed node class in the package, all node are derived from it.
*
* @param inner
* Sink interface implementation.
* @param outer
* Source interface implementation.
* @param valName
* val name of this node.
* @tparam DI
* Downward-flowing parameters received on the inner side of the node. It is usually a brunch of parameters
* describing the protocol parameters from a source. For an [[InwardNode]], it is determined by the connected
* [[OutwardNode]]. Since it can be connected to multiple sources, this parameter is always a Seq of source port
* parameters.
* @tparam UI
* Upward-flowing parameters generated by the inner side of the node. It is usually a brunch of parameters describing
* the protocol parameters of a sink. For an [[InwardNode]], it is determined itself.
* @tparam EI
* Edge Parameters describing a connection on the inner side of the node. It is usually a brunch of transfers
* specified for a sink according to protocol.
* @tparam BI
* Bundle type used when connecting to the inner side of the node. It is a hardware interface of this sink interface.
* It should extends from [[chisel3.Data]], which represents the real hardware.
* @tparam DO
* Downward-flowing parameters generated on the outer side of the node. It is usually a brunch of parameters
* describing the protocol parameters of a source. For an [[OutwardNode]], it is determined itself.
* @tparam UO
* Upward-flowing parameters received by the outer side of the node. It is usually a brunch of parameters describing
* the protocol parameters from a sink. For an [[OutwardNode]], it is determined by the connected [[InwardNode]].
* Since it can be connected to multiple sinks, this parameter is always a Seq of sink port parameters.
* @tparam EO
* Edge Parameters describing a connection on the outer side of the node. It is usually a brunch of transfers
* specified for a source according to protocol.
* @tparam BO
* Bundle type used when connecting to the outer side of the node. It is a hardware interface of this source
* interface. It should extends from [[chisel3.Data]], which represents the real hardware.
*
* @note
* Call Graph of [[MixedNode]]
* - line `─`: source is process by a function and generate pass to others
* - Arrow `→`: target of arrow is generated by source
*
* {{{
* (from the other node)
* ┌─────────────────────────────────────────────────────────[[InwardNode.uiParams]]─────────────┐
* ↓ │
* (binding node when elaboration) [[OutwardNode.uoParams]]────────────────────────[[MixedNode.mapParamsU]]→──────────┐ │
* [[InwardNode.accPI]] │ │ │
* │ │ (based on protocol) │
* │ │ [[MixedNode.inner.edgeI]] │
* │ │ ↓ │
* ↓ │ │ │
* (immobilize after elaboration) (inward port from [[OutwardNode]]) │ ↓ │
* [[InwardNode.iBindings]]──┐ [[MixedNode.iDirectPorts]]────────────────────→[[MixedNode.iPorts]] [[InwardNode.uiParams]] │
* │ │ ↑ │ │ │
* │ │ │ [[OutwardNode.doParams]] │ │
* │ │ │ (from the other node) │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* │ │ │ └────────┬──────────────┤ │
* │ │ │ │ │ │
* │ │ │ │ (based on protocol) │
* │ │ │ │ [[MixedNode.inner.edgeI]] │
* │ │ │ │ │ │
* │ │ (from the other node) │ ↓ │
* │ └───[[OutwardNode.oPortMapping]] [[OutwardNode.oStar]] │ [[MixedNode.edgesIn]]───┐ │
* │ ↑ ↑ │ │ ↓ │
* │ │ │ │ │ [[MixedNode.in]] │
* │ │ │ │ ↓ ↑ │
* │ (solve star connection) │ │ │ [[MixedNode.bundleIn]]──┘ │
* ├───[[MixedNode.resolveStar]]→─┼─────────────────────────────┤ └────────────────────────────────────┐ │
* │ │ │ [[MixedNode.bundleOut]]─┐ │ │
* │ │ │ ↑ ↓ │ │
* │ │ │ │ [[MixedNode.out]] │ │
* │ ↓ ↓ │ ↑ │ │
* │ ┌─────[[InwardNode.iPortMapping]] [[InwardNode.iStar]] [[MixedNode.edgesOut]]──┘ │ │
* │ │ (from the other node) ↑ │ │
* │ │ │ │ │ │
* │ │ │ [[MixedNode.outer.edgeO]] │ │
* │ │ │ (based on protocol) │ │
* │ │ │ │ │ │
* │ │ │ ┌────────────────────────────────────────┤ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* (immobilize after elaboration)│ ↓ │ │ │ │
* [[OutwardNode.oBindings]]─┘ [[MixedNode.oDirectPorts]]───→[[MixedNode.oPorts]] [[OutwardNode.doParams]] │ │
* ↑ (inward port from [[OutwardNode]]) │ │ │ │
* │ ┌─────────────────────────────────────────┤ │ │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* [[OutwardNode.accPO]] │ ↓ │ │ │
* (binding node when elaboration) │ [[InwardNode.diParams]]─────→[[MixedNode.mapParamsD]]────────────────────────────┘ │ │
* │ ↑ │ │
* │ └──────────────────────────────────────────────────────────────────────────────────────────┘ │
* └──────────────────────────────────────────────────────────────────────────────────────────────────────────┘
* }}}
*/
abstract class MixedNode[DI, UI, EI, BI <: Data, DO, UO, EO, BO <: Data](
val inner: InwardNodeImp[DI, UI, EI, BI],
val outer: OutwardNodeImp[DO, UO, EO, BO]
)(
implicit valName: ValName)
extends BaseNode
with NodeHandle[DI, UI, EI, BI, DO, UO, EO, BO]
with InwardNode[DI, UI, BI]
with OutwardNode[DO, UO, BO] {
// Generate a [[NodeHandle]] with inward and outward node are both this node.
val inward = this
val outward = this
/** Debug info of nodes binding. */
def bindingInfo: String = s"""$iBindingInfo
|$oBindingInfo
|""".stripMargin
/** Debug info of ports connecting. */
def connectedPortsInfo: String = s"""${oPorts.size} outward ports connected: [${oPorts.map(_._2.name).mkString(",")}]
|${iPorts.size} inward ports connected: [${iPorts.map(_._2.name).mkString(",")}]
|""".stripMargin
/** Debug info of parameters propagations. */
def parametersInfo: String = s"""${doParams.size} downstream outward parameters: [${doParams.mkString(",")}]
|${uoParams.size} upstream outward parameters: [${uoParams.mkString(",")}]
|${diParams.size} downstream inward parameters: [${diParams.mkString(",")}]
|${uiParams.size} upstream inward parameters: [${uiParams.mkString(",")}]
|""".stripMargin
/** For a given node, converts [[OutwardNode.accPO]] and [[InwardNode.accPI]] to [[MixedNode.oPortMapping]] and
* [[MixedNode.iPortMapping]].
*
* Given counts of known inward and outward binding and inward and outward star bindings, return the resolved inward
* stars and outward stars.
*
* This method will also validate the arguments and throw a runtime error if the values are unsuitable for this type
* of node.
*
* @param iKnown
* Number of known-size ([[BIND_ONCE]]) input bindings.
* @param oKnown
* Number of known-size ([[BIND_ONCE]]) output bindings.
* @param iStar
* Number of unknown size ([[BIND_STAR]]) input bindings.
* @param oStar
* Number of unknown size ([[BIND_STAR]]) output bindings.
* @return
* A Tuple of the resolved number of input and output connections.
*/
protected[diplomacy] def resolveStar(iKnown: Int, oKnown: Int, iStar: Int, oStar: Int): (Int, Int)
/** Function to generate downward-flowing outward params from the downward-flowing input params and the current output
* ports.
*
* @param n
* The size of the output sequence to generate.
* @param p
* Sequence of downward-flowing input parameters of this node.
* @return
* A `n`-sized sequence of downward-flowing output edge parameters.
*/
protected[diplomacy] def mapParamsD(n: Int, p: Seq[DI]): Seq[DO]
/** Function to generate upward-flowing input parameters from the upward-flowing output parameters [[uiParams]].
*
* @param n
* Size of the output sequence.
* @param p
* Upward-flowing output edge parameters.
* @return
* A n-sized sequence of upward-flowing input edge parameters.
*/
protected[diplomacy] def mapParamsU(n: Int, p: Seq[UO]): Seq[UI]
/** @return
* The sink cardinality of the node, the number of outputs bound with [[BIND_QUERY]] summed with inputs bound with
* [[BIND_STAR]].
*/
protected[diplomacy] lazy val sinkCard: Int = oBindings.count(_._3 == BIND_QUERY) + iBindings.count(_._3 == BIND_STAR)
/** @return
* The source cardinality of this node, the number of inputs bound with [[BIND_QUERY]] summed with the number of
* output bindings bound with [[BIND_STAR]].
*/
protected[diplomacy] lazy val sourceCard: Int =
iBindings.count(_._3 == BIND_QUERY) + oBindings.count(_._3 == BIND_STAR)
/** @return list of nodes involved in flex bindings with this node. */
protected[diplomacy] lazy val flexes: Seq[BaseNode] =
oBindings.filter(_._3 == BIND_FLEX).map(_._2) ++ iBindings.filter(_._3 == BIND_FLEX).map(_._2)
/** Resolves the flex to be either source or sink and returns the offset where the [[BIND_STAR]] operators begin
* greedily taking up the remaining connections.
*
* @return
* A value >= 0 if it is sink cardinality, a negative value for source cardinality. The magnitude of the return
* value is not relevant.
*/
protected[diplomacy] lazy val flexOffset: Int = {
/** Recursively performs a depth-first search of the [[flexes]], [[BaseNode]]s connected to this node with flex
* operators. The algorithm bottoms out when we either get to a node we have already visited or when we get to a
* connection that is not a flex and can set the direction for us. Otherwise, recurse by visiting the `flexes` of
* each node in the current set and decide whether they should be added to the set or not.
*
* @return
* the mapping of [[BaseNode]] indexed by their serial numbers.
*/
def DFS(v: BaseNode, visited: Map[Int, BaseNode]): Map[Int, BaseNode] = {
if (visited.contains(v.serial) || !v.flexibleArityDirection) {
visited
} else {
v.flexes.foldLeft(visited + (v.serial -> v))((sum, n) => DFS(n, sum))
}
}
/** Determine which [[BaseNode]] are involved in resolving the flex connections to/from this node.
*
* @example
* {{{
* a :*=* b :*=* c
* d :*=* b
* e :*=* f
* }}}
*
* `flexSet` for `a`, `b`, `c`, or `d` will be `Set(a, b, c, d)` `flexSet` for `e` or `f` will be `Set(e,f)`
*/
val flexSet = DFS(this, Map()).values
/** The total number of :*= operators where we're on the left. */
val allSink = flexSet.map(_.sinkCard).sum
/** The total number of :=* operators used when we're on the right. */
val allSource = flexSet.map(_.sourceCard).sum
require(
allSink == 0 || allSource == 0,
s"The nodes ${flexSet.map(_.name)} which are inter-connected by :*=* have ${allSink} :*= operators and ${allSource} :=* operators connected to them, making it impossible to determine cardinality inference direction."
)
allSink - allSource
}
/** @return A value >= 0 if it is sink cardinality, a negative value for source cardinality. */
protected[diplomacy] def edgeArityDirection(n: BaseNode): Int = {
if (flexibleArityDirection) flexOffset
else if (n.flexibleArityDirection) n.flexOffset
else 0
}
/** For a node which is connected between two nodes, select the one that will influence the direction of the flex
* resolution.
*/
protected[diplomacy] def edgeAritySelect(n: BaseNode, l: => Int, r: => Int): Int = {
val dir = edgeArityDirection(n)
if (dir < 0) l
else if (dir > 0) r
else 1
}
/** Ensure that the same node is not visited twice in resolving `:*=`, etc operators. */
private var starCycleGuard = false
/** Resolve all the star operators into concrete indicies. As connections are being made, some may be "star"
* connections which need to be resolved. In some way to determine how many actual edges they correspond to. We also
* need to build up the ranges of edges which correspond to each binding operator, so that We can apply the correct
* edge parameters and later build up correct bundle connections.
*
* [[oPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that oPort (binding
* operator). [[iPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that iPort
* (binding operator). [[oStar]]: `Int` the value to return for this node `N` for any `N :*= foo` or `N :*=* foo :*=
* bar` [[iStar]]: `Int` the value to return for this node `N` for any `foo :=* N` or `bar :=* foo :*=* N`
*/
protected[diplomacy] lazy val (
oPortMapping: Seq[(Int, Int)],
iPortMapping: Seq[(Int, Int)],
oStar: Int,
iStar: Int
) = {
try {
if (starCycleGuard) throw StarCycleException()
starCycleGuard = true
// For a given node N...
// Number of foo :=* N
// + Number of bar :=* foo :*=* N
val oStars = oBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) < 0)
}
// Number of N :*= foo
// + Number of N :*=* foo :*= bar
val iStars = iBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) > 0)
}
// 1 for foo := N
// + bar.iStar for bar :*= foo :*=* N
// + foo.iStar for foo :*= N
// + 0 for foo :=* N
val oKnown = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, 0, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => 0
}
}.sum
// 1 for N := foo
// + bar.oStar for N :*=* foo :=* bar
// + foo.oStar for N :=* foo
// + 0 for N :*= foo
val iKnown = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, 0)
case BIND_QUERY => n.oStar
case BIND_STAR => 0
}
}.sum
// Resolve star depends on the node subclass to implement the algorithm for this.
val (iStar, oStar) = resolveStar(iKnown, oKnown, iStars, oStars)
// Cumulative list of resolved outward binding range starting points
val oSum = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, oStar, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => oStar
}
}.scanLeft(0)(_ + _)
// Cumulative list of resolved inward binding range starting points
val iSum = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, iStar)
case BIND_QUERY => n.oStar
case BIND_STAR => iStar
}
}.scanLeft(0)(_ + _)
// Create ranges for each binding based on the running sums and return
// those along with resolved values for the star operations.
(oSum.init.zip(oSum.tail), iSum.init.zip(iSum.tail), oStar, iStar)
} catch {
case c: StarCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Sequence of inward ports.
*
* This should be called after all star bindings are resolved.
*
* Each element is: `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding.
* `n` Instance of inward node. `p` View of [[Parameters]] where this connection was made. `s` Source info where this
* connection was made in the source code.
*/
protected[diplomacy] lazy val oDirectPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] =
oBindings.flatMap { case (i, n, _, p, s) =>
// for each binding operator in this node, look at what it connects to
val (start, end) = n.iPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
/** Sequence of outward ports.
*
* This should be called after all star bindings are resolved.
*
* `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. `n` Instance of
* outward node. `p` View of [[Parameters]] where this connection was made. `s` [[SourceInfo]] where this connection
* was made in the source code.
*/
protected[diplomacy] lazy val iDirectPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] =
iBindings.flatMap { case (i, n, _, p, s) =>
// query this port index range of this node in the other side of node.
val (start, end) = n.oPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
// Ephemeral nodes ( which have non-None iForward/oForward) have in_degree = out_degree
// Thus, there must exist an Eulerian path and the below algorithms terminate
@scala.annotation.tailrec
private def oTrace(
tuple: (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)
): (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.iForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => oTrace((j, m, p, s))
}
}
@scala.annotation.tailrec
private def iTrace(
tuple: (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)
): (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.oForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => iTrace((j, m, p, s))
}
}
/** Final output ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - Numeric index of this binding in the [[InwardNode]] on the other end.
* - [[InwardNode]] on the other end of this binding.
* - A view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val oPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oDirectPorts.map(oTrace)
/** Final input ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - numeric index of this binding in [[OutwardNode]] on the other end.
* - [[OutwardNode]] on the other end of this binding.
* - a view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val iPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iDirectPorts.map(iTrace)
private var oParamsCycleGuard = false
protected[diplomacy] lazy val diParams: Seq[DI] = iPorts.map { case (i, n, _, _) => n.doParams(i) }
protected[diplomacy] lazy val doParams: Seq[DO] = {
try {
if (oParamsCycleGuard) throw DownwardCycleException()
oParamsCycleGuard = true
val o = mapParamsD(oPorts.size, diParams)
require(
o.size == oPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of outward ports should equal the number of produced outward parameters.
|$context
|$connectedPortsInfo
|Downstreamed inward parameters: [${diParams.mkString(",")}]
|Produced outward parameters: [${o.mkString(",")}]
|""".stripMargin
)
o.map(outer.mixO(_, this))
} catch {
case c: DownwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
private var iParamsCycleGuard = false
protected[diplomacy] lazy val uoParams: Seq[UO] = oPorts.map { case (o, n, _, _) => n.uiParams(o) }
protected[diplomacy] lazy val uiParams: Seq[UI] = {
try {
if (iParamsCycleGuard) throw UpwardCycleException()
iParamsCycleGuard = true
val i = mapParamsU(iPorts.size, uoParams)
require(
i.size == iPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of inward ports should equal the number of produced inward parameters.
|$context
|$connectedPortsInfo
|Upstreamed outward parameters: [${uoParams.mkString(",")}]
|Produced inward parameters: [${i.mkString(",")}]
|""".stripMargin
)
i.map(inner.mixI(_, this))
} catch {
case c: UpwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Outward edge parameters. */
protected[diplomacy] lazy val edgesOut: Seq[EO] =
(oPorts.zip(doParams)).map { case ((i, n, p, s), o) => outer.edgeO(o, n.uiParams(i), p, s) }
/** Inward edge parameters. */
protected[diplomacy] lazy val edgesIn: Seq[EI] =
(iPorts.zip(uiParams)).map { case ((o, n, p, s), i) => inner.edgeI(n.doParams(o), i, p, s) }
/** A tuple of the input edge parameters and output edge parameters for the edges bound to this node.
*
* If you need to access to the edges of a foreign Node, use this method (in/out create bundles).
*/
lazy val edges: Edges[EI, EO] = Edges(edgesIn, edgesOut)
/** Create actual Wires corresponding to the Bundles parameterized by the outward edges of this node. */
protected[diplomacy] lazy val bundleOut: Seq[BO] = edgesOut.map { e =>
val x = Wire(outer.bundleO(e)).suggestName(s"${valName.value}Out")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
/** Create actual Wires corresponding to the Bundles parameterized by the inward edges of this node. */
protected[diplomacy] lazy val bundleIn: Seq[BI] = edgesIn.map { e =>
val x = Wire(inner.bundleI(e)).suggestName(s"${valName.value}In")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
private def emptyDanglesOut: Seq[Dangle] = oPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(serial, i),
sink = HalfEdge(n.serial, j),
flipped = false,
name = wirePrefix + "out",
dataOpt = None
)
}
private def emptyDanglesIn: Seq[Dangle] = iPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(n.serial, j),
sink = HalfEdge(serial, i),
flipped = true,
name = wirePrefix + "in",
dataOpt = None
)
}
/** Create the [[Dangle]]s which describe the connections from this node output to other nodes inputs. */
protected[diplomacy] def danglesOut: Seq[Dangle] = emptyDanglesOut.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleOut(i)))
}
/** Create the [[Dangle]]s which describe the connections from this node input from other nodes outputs. */
protected[diplomacy] def danglesIn: Seq[Dangle] = emptyDanglesIn.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleIn(i)))
}
private[diplomacy] var instantiated = false
/** Gather Bundle and edge parameters of outward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def out: Seq[(BO, EO)] = {
require(
instantiated,
s"$name.out should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleOut.zip(edgesOut)
}
/** Gather Bundle and edge parameters of inward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def in: Seq[(BI, EI)] = {
require(
instantiated,
s"$name.in should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleIn.zip(edgesIn)
}
/** Actually instantiate this node during [[LazyModuleImp]] evaluation. Mark that it's safe to use the Bundle wires,
* instantiate monitors on all input ports if appropriate, and return all the dangles of this node.
*/
protected[diplomacy] def instantiate(): Seq[Dangle] = {
instantiated = true
if (!circuitIdentity) {
(iPorts.zip(in)).foreach { case ((_, _, p, _), (b, e)) => if (p(MonitorsEnabled)) inner.monitor(b, e) }
}
danglesOut ++ danglesIn
}
protected[diplomacy] def cloneDangles(): Seq[Dangle] = emptyDanglesOut ++ emptyDanglesIn
/** Connects the outward part of a node with the inward part of this node. */
protected[diplomacy] def bind(
h: OutwardNode[DI, UI, BI],
binding: NodeBinding
)(
implicit p: Parameters,
sourceInfo: SourceInfo
): Unit = {
val x = this // x := y
val y = h
sourceLine(sourceInfo, " at ", "")
val i = x.iPushed
val o = y.oPushed
y.oPush(
i,
x,
binding match {
case BIND_ONCE => BIND_ONCE
case BIND_FLEX => BIND_FLEX
case BIND_STAR => BIND_QUERY
case BIND_QUERY => BIND_STAR
}
)
x.iPush(o, y, binding)
}
/* Metadata for printing the node graph. */
def inputs: Seq[(OutwardNode[DI, UI, BI], RenderedEdge)] = (iPorts.zip(edgesIn)).map { case ((_, n, p, _), e) =>
val re = inner.render(e)
(n, re.copy(flipped = re.flipped != p(RenderFlipped)))
}
/** Metadata for printing the node graph */
def outputs: Seq[(InwardNode[DO, UO, BO], RenderedEdge)] = oPorts.map { case (i, n, _, _) => (n, n.inputs(i)._2) }
}
File Cluster.scala:
package freechips.rocketchip.subsystem
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy.bundlebridge._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.devices.debug.{TLDebugModule}
import freechips.rocketchip.diplomacy.{FlipRendering}
import freechips.rocketchip.interrupts.{IntIdentityNode, IntSyncIdentityNode, NullIntSource}
import freechips.rocketchip.prci.{ClockCrossingType, NoCrossing, ClockSinkParameters, ClockGroupIdentityNode, BundleBridgeBlockDuringReset}
import freechips.rocketchip.tile.{RocketTile, NMI, TraceBundle}
import freechips.rocketchip.tilelink.TLWidthWidget
import freechips.rocketchip.util.TraceCoreInterface
import scala.collection.immutable.SortedMap
case class ClustersLocated(loc: HierarchicalLocation) extends Field[Seq[CanAttachCluster]](Nil)
case class ClusterParams(
val clusterId: Int,
val clockSinkParams: ClockSinkParameters = ClockSinkParameters()
) extends HierarchicalElementParams {
val baseName = "cluster"
val uniqueName = s"${baseName}_$clusterId"
def instantiate(crossing: HierarchicalElementCrossingParamsLike, lookup: LookupByClusterIdImpl)(implicit p: Parameters): Cluster = {
new Cluster(this, crossing.crossingType, lookup)
}
}
class Cluster(
val thisClusterParams: ClusterParams,
crossing: ClockCrossingType,
lookup: LookupByClusterIdImpl)(implicit p: Parameters) extends BaseHierarchicalElement(crossing)(p)
with Attachable
with HasConfigurableTLNetworkTopology
with InstantiatesHierarchicalElements
with HasHierarchicalElements
{
val busContextName = thisClusterParams.baseName
lazy val clusterId = thisClusterParams.clusterId
lazy val location = InCluster(clusterId)
lazy val allClockGroupsNode = ClockGroupIdentityNode()
val csbus = tlBusWrapperLocationMap(CSBUS(clusterId)) // like the sbus in the base subsystem
val ccbus = tlBusWrapperLocationMap(CCBUS(clusterId)) // like the cbus in the base subsystem
val cmbus = tlBusWrapperLocationMap.lift(CMBUS(clusterId)).getOrElse(csbus)
csbus.clockGroupNode := allClockGroupsNode
ccbus.clockGroupNode := allClockGroupsNode
val slaveNode = ccbus.inwardNode
val masterNode = cmbus.outwardNode
lazy val ibus = LazyModule(new InterruptBusWrapper)
ibus.clockNode := csbus.fixedClockNode
def msipDomain = this
def meipDomain = this
def seipDomain = this
def toPlicDomain = this
lazy val msipNodes = totalTileIdList.map { i => (i, IntIdentityNode()) }.to(SortedMap)
lazy val meipNodes = totalTileIdList.map { i => (i, IntIdentityNode()) }.to(SortedMap)
lazy val seipNodes = totalTileIdList.map { i => (i, IntIdentityNode()) }.to(SortedMap)
lazy val tileToPlicNodes = totalTileIdList.map { i => (i, IntIdentityNode()) }.to(SortedMap)
lazy val debugNodes = totalTileIdList.map { i => (i, IntSyncIdentityNode()) }.to(SortedMap)
lazy val nmiNodes = totalTiles.filter { case (i,t) => t.tileParams.core.useNMI }
.mapValues(_ => BundleBridgeIdentityNode[NMI]()).to(SortedMap)
lazy val tileHartIdNodes = totalTileIdList.map { i => (i, BundleBridgeIdentityNode[UInt]()) }.to(SortedMap)
lazy val tileResetVectorNodes = totalTileIdList.map { i => (i, BundleBridgeIdentityNode[UInt]()) }.to(SortedMap)
lazy val traceCoreNodes = totalTileIdList.map { i => (i, BundleBridgeIdentityNode[TraceCoreInterface]()) }.to(SortedMap)
lazy val traceNodes = totalTileIdList.map { i => (i, BundleBridgeIdentityNode[TraceBundle]()) }.to(SortedMap)
// TODO fix: shouldn't need to connect dummy notifications
tileHaltXbarNode := NullIntSource()
tileWFIXbarNode := NullIntSource()
tileCeaseXbarNode := NullIntSource()
override lazy val module = new ClusterModuleImp(this)
}
class ClusterModuleImp(outer: Cluster) extends BaseHierarchicalElementModuleImp[Cluster](outer)
case class InCluster(id: Int) extends HierarchicalLocation(s"Cluster$id")
class ClusterPRCIDomain(
clockSinkParams: ClockSinkParameters,
crossingParams: HierarchicalElementCrossingParamsLike,
clusterParams: ClusterParams,
lookup: LookupByClusterIdImpl)
(implicit p: Parameters) extends HierarchicalElementPRCIDomain[Cluster](clockSinkParams, crossingParams)
{
val element = element_reset_domain {
LazyModule(clusterParams.instantiate(crossingParams, lookup))
}
// Nothing should depend on the clocks coming from clockNode anyways
clockNode := element.csbus.fixedClockNode
}
trait CanAttachCluster {
type ClusterContextType <: DefaultHierarchicalElementContextType
def clusterParams: ClusterParams
def crossingParams: HierarchicalElementCrossingParamsLike
def instantiate(allClusterParams: Seq[ClusterParams], instantiatedClusters: SortedMap[Int, ClusterPRCIDomain])(implicit p: Parameters): ClusterPRCIDomain = {
val clockSinkParams = clusterParams.clockSinkParams.copy(name = Some(clusterParams.uniqueName))
val cluster_prci_domain = LazyModule(new ClusterPRCIDomain(
clockSinkParams, crossingParams, clusterParams, PriorityMuxClusterIdFromSeq(allClusterParams)))
cluster_prci_domain
}
def connect(domain: ClusterPRCIDomain, context: ClusterContextType): Unit = {
connectMasterPorts(domain, context)
connectSlavePorts(domain, context)
connectInterrupts(domain, context)
connectPRC(domain, context)
connectOutputNotifications(domain, context)
connectInputConstants(domain, context)
connectTrace(domain, context)
}
def connectMasterPorts(domain: ClusterPRCIDomain, context: Attachable): Unit = {
implicit val p = context.p
val dataBus = context.locateTLBusWrapper(crossingParams.master.where)
dataBus.coupleFrom(clusterParams.baseName) { bus =>
bus :=* crossingParams.master.injectNode(context) :=* domain.crossMasterPort(crossingParams.crossingType)
}
}
def connectSlavePorts(domain: ClusterPRCIDomain, context: Attachable): Unit = {
implicit val p = context.p
val controlBus = context.locateTLBusWrapper(crossingParams.slave.where)
controlBus.coupleTo(clusterParams.baseName) { bus =>
domain.crossSlavePort(crossingParams.crossingType) :*= crossingParams.slave.injectNode(context) :*= TLWidthWidget(controlBus.beatBytes) :*= bus
}
}
def connectInterrupts(domain: ClusterPRCIDomain, context: ClusterContextType): Unit = {
implicit val p = context.p
domain.element.debugNodes.foreach { case (hartid, node) =>
node := context.debugNodes(hartid)
}
domain.element.msipNodes.foreach { case (hartid, node) => context.msipDomain {
domain.crossIntIn(crossingParams.crossingType, node) := context.msipNodes(hartid)
}}
domain.element.meipNodes.foreach { case (hartid, node) => context.meipDomain {
domain.crossIntIn(crossingParams.crossingType, node) := context.meipNodes(hartid)
}}
domain.element.seipNodes.foreach { case (hartid, node) => context.seipDomain {
domain.crossIntIn(crossingParams.crossingType, node) := context.seipNodes(hartid)
}}
domain.element.tileToPlicNodes.foreach { case (hartid, node) =>
FlipRendering { implicit p =>
context.tileToPlicNodes(hartid) :=* domain.crossIntOut(crossingParams.crossingType, node) }
}
context.ibus.fromSync :=* domain.crossIntOut(crossingParams.crossingType, domain.element.ibus.toPLIC)
domain.element.nmiNodes.foreach { case (hartid, node) =>
node := context.nmiNodes(hartid)
}
}
def connectPRC(domain: ClusterPRCIDomain, context: ClusterContextType): Unit = {
implicit val p = context.p
domain.element.allClockGroupsNode :*= context.allClockGroupsNode
domain {
domain.element_reset_domain.clockNode := crossingParams.resetCrossingType.injectClockNode := domain.clockNode
}
}
def connectOutputNotifications(domain: ClusterPRCIDomain, context: ClusterContextType): Unit = {
implicit val p = context.p
context.tileHaltXbarNode :=* domain.crossIntOut(NoCrossing, domain.element.tileHaltXbarNode)
context.tileWFIXbarNode :=* domain.crossIntOut(NoCrossing, domain.element.tileWFIXbarNode)
context.tileCeaseXbarNode :=* domain.crossIntOut(NoCrossing, domain.element.tileCeaseXbarNode)
}
def connectInputConstants(domain: ClusterPRCIDomain, context: ClusterContextType): Unit = {
implicit val p = context.p
val tlBusToGetPrefixFrom = context.locateTLBusWrapper(crossingParams.mmioBaseAddressPrefixWhere)
domain.element.tileHartIdNodes.foreach { case (hartid, node) =>
node := context.tileHartIdNodes(hartid)
}
domain.element.tileResetVectorNodes.foreach { case (hartid, node) =>
node := context.tileResetVectorNodes(hartid)
}
}
def connectTrace(domain: ClusterPRCIDomain, context: ClusterContextType): Unit = {
implicit val p = context.p
domain.element.traceNodes.foreach { case (hartid, node) =>
val traceNexusNode = BundleBridgeBlockDuringReset[TraceBundle](
resetCrossingType = crossingParams.resetCrossingType)
context.traceNodes(hartid) := traceNexusNode := node
}
domain.element.traceCoreNodes.foreach { case (hartid, node) =>
val traceCoreNexusNode = BundleBridgeBlockDuringReset[TraceCoreInterface](
resetCrossingType = crossingParams.resetCrossingType)
context.traceCoreNodes(hartid) :*= traceCoreNexusNode := node
}
}
}
case class ClusterAttachParams(
clusterParams: ClusterParams,
crossingParams: HierarchicalElementCrossingParamsLike
) extends CanAttachCluster
case class CloneClusterAttachParams(
sourceClusterId: Int,
cloneParams: CanAttachCluster
) extends CanAttachCluster {
def clusterParams = cloneParams.clusterParams
def crossingParams = cloneParams.crossingParams
override def instantiate(allClusterParams: Seq[ClusterParams], instantiatedClusters: SortedMap[Int, ClusterPRCIDomain])(implicit p: Parameters): ClusterPRCIDomain = {
require(instantiatedClusters.contains(sourceClusterId))
val clockSinkParams = clusterParams.clockSinkParams.copy(name = Some(clusterParams.uniqueName))
val cluster_prci_domain = CloneLazyModule(
new ClusterPRCIDomain(clockSinkParams, crossingParams, clusterParams, PriorityMuxClusterIdFromSeq(allClusterParams)),
instantiatedClusters(sourceClusterId)
)
cluster_prci_domain
}
}
| module ClusterPRCIDomain( // @[ClockDomain.scala:14:9]
input auto_element_reset_domain_element_all_clock_groups_in_1_member_ccbus0_0_clock, // @[LazyModuleImp.scala:107:25]
input auto_element_reset_domain_element_all_clock_groups_in_1_member_ccbus0_0_reset, // @[LazyModuleImp.scala:107:25]
input auto_element_reset_domain_element_all_clock_groups_in_0_member_csbus0_0_clock, // @[LazyModuleImp.scala:107:25]
input auto_element_reset_domain_element_all_clock_groups_in_0_member_csbus0_0_reset, // @[LazyModuleImp.scala:107:25]
output auto_element_reset_domain_element_trace_nodes_out_3_insns_0_valid, // @[LazyModuleImp.scala:107:25]
output [39:0] auto_element_reset_domain_element_trace_nodes_out_3_insns_0_iaddr, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_element_reset_domain_element_trace_nodes_out_3_insns_0_insn, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_element_reset_domain_element_trace_nodes_out_3_insns_0_priv, // @[LazyModuleImp.scala:107:25]
output auto_element_reset_domain_element_trace_nodes_out_3_insns_0_exception, // @[LazyModuleImp.scala:107:25]
output auto_element_reset_domain_element_trace_nodes_out_3_insns_0_interrupt, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_element_reset_domain_element_trace_nodes_out_3_insns_0_cause, // @[LazyModuleImp.scala:107:25]
output [39:0] auto_element_reset_domain_element_trace_nodes_out_3_insns_0_tval, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_element_reset_domain_element_trace_nodes_out_3_time, // @[LazyModuleImp.scala:107:25]
output auto_element_reset_domain_element_trace_nodes_out_2_insns_0_valid, // @[LazyModuleImp.scala:107:25]
output [39:0] auto_element_reset_domain_element_trace_nodes_out_2_insns_0_iaddr, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_element_reset_domain_element_trace_nodes_out_2_insns_0_insn, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_element_reset_domain_element_trace_nodes_out_2_insns_0_priv, // @[LazyModuleImp.scala:107:25]
output auto_element_reset_domain_element_trace_nodes_out_2_insns_0_exception, // @[LazyModuleImp.scala:107:25]
output auto_element_reset_domain_element_trace_nodes_out_2_insns_0_interrupt, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_element_reset_domain_element_trace_nodes_out_2_insns_0_cause, // @[LazyModuleImp.scala:107:25]
output [39:0] auto_element_reset_domain_element_trace_nodes_out_2_insns_0_tval, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_element_reset_domain_element_trace_nodes_out_2_time, // @[LazyModuleImp.scala:107:25]
output auto_element_reset_domain_element_trace_nodes_out_1_insns_0_valid, // @[LazyModuleImp.scala:107:25]
output [39:0] auto_element_reset_domain_element_trace_nodes_out_1_insns_0_iaddr, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_element_reset_domain_element_trace_nodes_out_1_insns_0_insn, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_element_reset_domain_element_trace_nodes_out_1_insns_0_priv, // @[LazyModuleImp.scala:107:25]
output auto_element_reset_domain_element_trace_nodes_out_1_insns_0_exception, // @[LazyModuleImp.scala:107:25]
output auto_element_reset_domain_element_trace_nodes_out_1_insns_0_interrupt, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_element_reset_domain_element_trace_nodes_out_1_insns_0_cause, // @[LazyModuleImp.scala:107:25]
output [39:0] auto_element_reset_domain_element_trace_nodes_out_1_insns_0_tval, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_element_reset_domain_element_trace_nodes_out_1_time, // @[LazyModuleImp.scala:107:25]
output auto_element_reset_domain_element_trace_nodes_out_0_insns_0_valid, // @[LazyModuleImp.scala:107:25]
output [39:0] auto_element_reset_domain_element_trace_nodes_out_0_insns_0_iaddr, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_element_reset_domain_element_trace_nodes_out_0_insns_0_insn, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_element_reset_domain_element_trace_nodes_out_0_insns_0_priv, // @[LazyModuleImp.scala:107:25]
output auto_element_reset_domain_element_trace_nodes_out_0_insns_0_exception, // @[LazyModuleImp.scala:107:25]
output auto_element_reset_domain_element_trace_nodes_out_0_insns_0_interrupt, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_element_reset_domain_element_trace_nodes_out_0_insns_0_cause, // @[LazyModuleImp.scala:107:25]
output [39:0] auto_element_reset_domain_element_trace_nodes_out_0_insns_0_tval, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_element_reset_domain_element_trace_nodes_out_0_time, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_element_reset_domain_element_tile_hart_id_nodes_in_3, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_element_reset_domain_element_tile_hart_id_nodes_in_2, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_element_reset_domain_element_tile_hart_id_nodes_in_1, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_element_reset_domain_element_tile_hart_id_nodes_in_0, // @[LazyModuleImp.scala:107:25]
input auto_element_reset_domain_element_debug_nodes_in_3_sync_0, // @[LazyModuleImp.scala:107:25]
input auto_element_reset_domain_element_debug_nodes_in_2_sync_0, // @[LazyModuleImp.scala:107:25]
input auto_element_reset_domain_element_debug_nodes_in_1_sync_0, // @[LazyModuleImp.scala:107:25]
input auto_element_reset_domain_element_debug_nodes_in_0_sync_0, // @[LazyModuleImp.scala:107:25]
output auto_int_out_clock_xing_out_2_sync_0, // @[LazyModuleImp.scala:107:25]
output auto_int_out_clock_xing_out_2_sync_1, // @[LazyModuleImp.scala:107:25]
output auto_int_out_clock_xing_out_2_sync_2, // @[LazyModuleImp.scala:107:25]
output auto_int_out_clock_xing_out_2_sync_3, // @[LazyModuleImp.scala:107:25]
output auto_int_out_clock_xing_out_2_sync_4, // @[LazyModuleImp.scala:107:25]
output auto_int_out_clock_xing_out_1_sync_0, // @[LazyModuleImp.scala:107:25]
output auto_int_out_clock_xing_out_1_sync_1, // @[LazyModuleImp.scala:107:25]
output auto_int_out_clock_xing_out_1_sync_2, // @[LazyModuleImp.scala:107:25]
output auto_int_out_clock_xing_out_1_sync_3, // @[LazyModuleImp.scala:107:25]
output auto_int_out_clock_xing_out_1_sync_4, // @[LazyModuleImp.scala:107:25]
output auto_int_out_clock_xing_out_0_sync_0, // @[LazyModuleImp.scala:107:25]
output auto_int_out_clock_xing_out_0_sync_1, // @[LazyModuleImp.scala:107:25]
output auto_int_out_clock_xing_out_0_sync_2, // @[LazyModuleImp.scala:107:25]
output auto_int_out_clock_xing_out_0_sync_3, // @[LazyModuleImp.scala:107:25]
output auto_int_out_clock_xing_out_0_sync_4, // @[LazyModuleImp.scala:107:25]
input auto_int_in_clock_xing_in_11_sync_0, // @[LazyModuleImp.scala:107:25]
input auto_int_in_clock_xing_in_10_sync_0, // @[LazyModuleImp.scala:107:25]
input auto_int_in_clock_xing_in_9_sync_0, // @[LazyModuleImp.scala:107:25]
input auto_int_in_clock_xing_in_8_sync_0, // @[LazyModuleImp.scala:107:25]
input auto_int_in_clock_xing_in_7_sync_0, // @[LazyModuleImp.scala:107:25]
input auto_int_in_clock_xing_in_6_sync_0, // @[LazyModuleImp.scala:107:25]
input auto_int_in_clock_xing_in_5_sync_0, // @[LazyModuleImp.scala:107:25]
input auto_int_in_clock_xing_in_4_sync_0, // @[LazyModuleImp.scala:107:25]
input auto_int_in_clock_xing_in_3_sync_0, // @[LazyModuleImp.scala:107:25]
input auto_int_in_clock_xing_in_3_sync_1, // @[LazyModuleImp.scala:107:25]
input auto_int_in_clock_xing_in_2_sync_0, // @[LazyModuleImp.scala:107:25]
input auto_int_in_clock_xing_in_2_sync_1, // @[LazyModuleImp.scala:107:25]
input auto_int_in_clock_xing_in_1_sync_0, // @[LazyModuleImp.scala:107:25]
input auto_int_in_clock_xing_in_1_sync_1, // @[LazyModuleImp.scala:107:25]
input auto_int_in_clock_xing_in_0_sync_0, // @[LazyModuleImp.scala:107:25]
input auto_int_in_clock_xing_in_0_sync_1, // @[LazyModuleImp.scala:107:25]
input auto_tl_master_clock_xing_out_a_ready, // @[LazyModuleImp.scala:107:25]
output auto_tl_master_clock_xing_out_a_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_tl_master_clock_xing_out_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_tl_master_clock_xing_out_a_bits_param, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_tl_master_clock_xing_out_a_bits_size, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_tl_master_clock_xing_out_a_bits_source, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_tl_master_clock_xing_out_a_bits_address, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_tl_master_clock_xing_out_a_bits_mask, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_tl_master_clock_xing_out_a_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_tl_master_clock_xing_out_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output auto_tl_master_clock_xing_out_b_ready, // @[LazyModuleImp.scala:107:25]
input auto_tl_master_clock_xing_out_b_valid, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_tl_master_clock_xing_out_b_bits_param, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_tl_master_clock_xing_out_b_bits_source, // @[LazyModuleImp.scala:107:25]
input [31:0] auto_tl_master_clock_xing_out_b_bits_address, // @[LazyModuleImp.scala:107:25]
input auto_tl_master_clock_xing_out_c_ready, // @[LazyModuleImp.scala:107:25]
output auto_tl_master_clock_xing_out_c_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_tl_master_clock_xing_out_c_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_tl_master_clock_xing_out_c_bits_param, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_tl_master_clock_xing_out_c_bits_size, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_tl_master_clock_xing_out_c_bits_source, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_tl_master_clock_xing_out_c_bits_address, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_tl_master_clock_xing_out_c_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_tl_master_clock_xing_out_c_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output auto_tl_master_clock_xing_out_d_ready, // @[LazyModuleImp.scala:107:25]
input auto_tl_master_clock_xing_out_d_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_tl_master_clock_xing_out_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_tl_master_clock_xing_out_d_bits_param, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_tl_master_clock_xing_out_d_bits_size, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_tl_master_clock_xing_out_d_bits_source, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_tl_master_clock_xing_out_d_bits_sink, // @[LazyModuleImp.scala:107:25]
input auto_tl_master_clock_xing_out_d_bits_denied, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_tl_master_clock_xing_out_d_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_tl_master_clock_xing_out_d_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_tl_master_clock_xing_out_e_ready, // @[LazyModuleImp.scala:107:25]
output auto_tl_master_clock_xing_out_e_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_tl_master_clock_xing_out_e_bits_sink // @[LazyModuleImp.scala:107:25]
);
wire clockNode_auto_anon_in_reset; // @[ClockGroup.scala:104:9]
wire clockNode_auto_anon_in_clock; // @[ClockGroup.scala:104:9]
wire element_reset_domain_auto_clock_in_reset; // @[ClockDomain.scala:14:9]
wire element_reset_domain_auto_clock_in_clock; // @[ClockDomain.scala:14:9]
wire element_reset_domain_auto_element_csbus0_fixedClockNode_anon_out_reset; // @[ClockDomain.scala:14:9]
wire element_reset_domain_auto_element_csbus0_fixedClockNode_anon_out_clock; // @[ClockDomain.scala:14:9]
wire auto_element_reset_domain_element_all_clock_groups_in_1_member_ccbus0_0_clock_0 = auto_element_reset_domain_element_all_clock_groups_in_1_member_ccbus0_0_clock; // @[ClockDomain.scala:14:9]
wire auto_element_reset_domain_element_all_clock_groups_in_1_member_ccbus0_0_reset_0 = auto_element_reset_domain_element_all_clock_groups_in_1_member_ccbus0_0_reset; // @[ClockDomain.scala:14:9]
wire auto_element_reset_domain_element_all_clock_groups_in_0_member_csbus0_0_clock_0 = auto_element_reset_domain_element_all_clock_groups_in_0_member_csbus0_0_clock; // @[ClockDomain.scala:14:9]
wire auto_element_reset_domain_element_all_clock_groups_in_0_member_csbus0_0_reset_0 = auto_element_reset_domain_element_all_clock_groups_in_0_member_csbus0_0_reset; // @[ClockDomain.scala:14:9]
wire [2:0] auto_element_reset_domain_element_tile_hart_id_nodes_in_3_0 = auto_element_reset_domain_element_tile_hart_id_nodes_in_3; // @[ClockDomain.scala:14:9]
wire [2:0] auto_element_reset_domain_element_tile_hart_id_nodes_in_2_0 = auto_element_reset_domain_element_tile_hart_id_nodes_in_2; // @[ClockDomain.scala:14:9]
wire [2:0] auto_element_reset_domain_element_tile_hart_id_nodes_in_1_0 = auto_element_reset_domain_element_tile_hart_id_nodes_in_1; // @[ClockDomain.scala:14:9]
wire [2:0] auto_element_reset_domain_element_tile_hart_id_nodes_in_0_0 = auto_element_reset_domain_element_tile_hart_id_nodes_in_0; // @[ClockDomain.scala:14:9]
wire auto_element_reset_domain_element_debug_nodes_in_3_sync_0_0 = auto_element_reset_domain_element_debug_nodes_in_3_sync_0; // @[ClockDomain.scala:14:9]
wire auto_element_reset_domain_element_debug_nodes_in_2_sync_0_0 = auto_element_reset_domain_element_debug_nodes_in_2_sync_0; // @[ClockDomain.scala:14:9]
wire auto_element_reset_domain_element_debug_nodes_in_1_sync_0_0 = auto_element_reset_domain_element_debug_nodes_in_1_sync_0; // @[ClockDomain.scala:14:9]
wire auto_element_reset_domain_element_debug_nodes_in_0_sync_0_0 = auto_element_reset_domain_element_debug_nodes_in_0_sync_0; // @[ClockDomain.scala:14:9]
wire auto_int_in_clock_xing_in_11_sync_0_0 = auto_int_in_clock_xing_in_11_sync_0; // @[ClockDomain.scala:14:9]
wire auto_int_in_clock_xing_in_10_sync_0_0 = auto_int_in_clock_xing_in_10_sync_0; // @[ClockDomain.scala:14:9]
wire auto_int_in_clock_xing_in_9_sync_0_0 = auto_int_in_clock_xing_in_9_sync_0; // @[ClockDomain.scala:14:9]
wire auto_int_in_clock_xing_in_8_sync_0_0 = auto_int_in_clock_xing_in_8_sync_0; // @[ClockDomain.scala:14:9]
wire auto_int_in_clock_xing_in_7_sync_0_0 = auto_int_in_clock_xing_in_7_sync_0; // @[ClockDomain.scala:14:9]
wire auto_int_in_clock_xing_in_6_sync_0_0 = auto_int_in_clock_xing_in_6_sync_0; // @[ClockDomain.scala:14:9]
wire auto_int_in_clock_xing_in_5_sync_0_0 = auto_int_in_clock_xing_in_5_sync_0; // @[ClockDomain.scala:14:9]
wire auto_int_in_clock_xing_in_4_sync_0_0 = auto_int_in_clock_xing_in_4_sync_0; // @[ClockDomain.scala:14:9]
wire auto_int_in_clock_xing_in_3_sync_0_0 = auto_int_in_clock_xing_in_3_sync_0; // @[ClockDomain.scala:14:9]
wire auto_int_in_clock_xing_in_3_sync_1_0 = auto_int_in_clock_xing_in_3_sync_1; // @[ClockDomain.scala:14:9]
wire auto_int_in_clock_xing_in_2_sync_0_0 = auto_int_in_clock_xing_in_2_sync_0; // @[ClockDomain.scala:14:9]
wire auto_int_in_clock_xing_in_2_sync_1_0 = auto_int_in_clock_xing_in_2_sync_1; // @[ClockDomain.scala:14:9]
wire auto_int_in_clock_xing_in_1_sync_0_0 = auto_int_in_clock_xing_in_1_sync_0; // @[ClockDomain.scala:14:9]
wire auto_int_in_clock_xing_in_1_sync_1_0 = auto_int_in_clock_xing_in_1_sync_1; // @[ClockDomain.scala:14:9]
wire auto_int_in_clock_xing_in_0_sync_0_0 = auto_int_in_clock_xing_in_0_sync_0; // @[ClockDomain.scala:14:9]
wire auto_int_in_clock_xing_in_0_sync_1_0 = auto_int_in_clock_xing_in_0_sync_1; // @[ClockDomain.scala:14:9]
wire auto_tl_master_clock_xing_out_a_ready_0 = auto_tl_master_clock_xing_out_a_ready; // @[ClockDomain.scala:14:9]
wire auto_tl_master_clock_xing_out_b_valid_0 = auto_tl_master_clock_xing_out_b_valid; // @[ClockDomain.scala:14:9]
wire [1:0] auto_tl_master_clock_xing_out_b_bits_param_0 = auto_tl_master_clock_xing_out_b_bits_param; // @[ClockDomain.scala:14:9]
wire [3:0] auto_tl_master_clock_xing_out_b_bits_source_0 = auto_tl_master_clock_xing_out_b_bits_source; // @[ClockDomain.scala:14:9]
wire [31:0] auto_tl_master_clock_xing_out_b_bits_address_0 = auto_tl_master_clock_xing_out_b_bits_address; // @[ClockDomain.scala:14:9]
wire auto_tl_master_clock_xing_out_c_ready_0 = auto_tl_master_clock_xing_out_c_ready; // @[ClockDomain.scala:14:9]
wire auto_tl_master_clock_xing_out_d_valid_0 = auto_tl_master_clock_xing_out_d_valid; // @[ClockDomain.scala:14:9]
wire [2:0] auto_tl_master_clock_xing_out_d_bits_opcode_0 = auto_tl_master_clock_xing_out_d_bits_opcode; // @[ClockDomain.scala:14:9]
wire [1:0] auto_tl_master_clock_xing_out_d_bits_param_0 = auto_tl_master_clock_xing_out_d_bits_param; // @[ClockDomain.scala:14:9]
wire [3:0] auto_tl_master_clock_xing_out_d_bits_size_0 = auto_tl_master_clock_xing_out_d_bits_size; // @[ClockDomain.scala:14:9]
wire [3:0] auto_tl_master_clock_xing_out_d_bits_source_0 = auto_tl_master_clock_xing_out_d_bits_source; // @[ClockDomain.scala:14:9]
wire [2:0] auto_tl_master_clock_xing_out_d_bits_sink_0 = auto_tl_master_clock_xing_out_d_bits_sink; // @[ClockDomain.scala:14:9]
wire auto_tl_master_clock_xing_out_d_bits_denied_0 = auto_tl_master_clock_xing_out_d_bits_denied; // @[ClockDomain.scala:14:9]
wire [63:0] auto_tl_master_clock_xing_out_d_bits_data_0 = auto_tl_master_clock_xing_out_d_bits_data; // @[ClockDomain.scala:14:9]
wire auto_tl_master_clock_xing_out_d_bits_corrupt_0 = auto_tl_master_clock_xing_out_d_bits_corrupt; // @[ClockDomain.scala:14:9]
wire auto_tl_master_clock_xing_out_e_ready_0 = auto_tl_master_clock_xing_out_e_ready; // @[ClockDomain.scala:14:9]
wire [31:0] auto_element_reset_domain_element_trace_core_nodes_out_3_group_0_iaddr = 32'h0; // @[ClockDomain.scala:14:9]
wire [31:0] auto_element_reset_domain_element_trace_core_nodes_out_3_tval = 32'h0; // @[ClockDomain.scala:14:9]
wire [31:0] auto_element_reset_domain_element_trace_core_nodes_out_3_cause = 32'h0; // @[ClockDomain.scala:14:9]
wire [31:0] auto_element_reset_domain_element_trace_core_nodes_out_2_group_0_iaddr = 32'h0; // @[ClockDomain.scala:14:9]
wire [31:0] auto_element_reset_domain_element_trace_core_nodes_out_2_tval = 32'h0; // @[ClockDomain.scala:14:9]
wire [31:0] auto_element_reset_domain_element_trace_core_nodes_out_2_cause = 32'h0; // @[ClockDomain.scala:14:9]
wire [31:0] auto_element_reset_domain_element_trace_core_nodes_out_1_group_0_iaddr = 32'h0; // @[ClockDomain.scala:14:9]
wire [31:0] auto_element_reset_domain_element_trace_core_nodes_out_1_tval = 32'h0; // @[ClockDomain.scala:14:9]
wire [31:0] auto_element_reset_domain_element_trace_core_nodes_out_1_cause = 32'h0; // @[ClockDomain.scala:14:9]
wire [31:0] auto_element_reset_domain_element_trace_core_nodes_out_0_group_0_iaddr = 32'h0; // @[ClockDomain.scala:14:9]
wire [31:0] auto_element_reset_domain_element_trace_core_nodes_out_0_tval = 32'h0; // @[ClockDomain.scala:14:9]
wire [31:0] auto_element_reset_domain_element_trace_core_nodes_out_0_cause = 32'h0; // @[ClockDomain.scala:14:9]
wire [31:0] element_reset_domain_auto_element_trace_core_nodes_out_3_group_0_iaddr = 32'h0; // @[ClockDomain.scala:14:9]
wire [31:0] element_reset_domain_auto_element_trace_core_nodes_out_3_tval = 32'h0; // @[ClockDomain.scala:14:9]
wire [31:0] element_reset_domain_auto_element_trace_core_nodes_out_3_cause = 32'h0; // @[ClockDomain.scala:14:9]
wire [31:0] element_reset_domain_auto_element_trace_core_nodes_out_2_group_0_iaddr = 32'h0; // @[ClockDomain.scala:14:9]
wire [31:0] element_reset_domain_auto_element_trace_core_nodes_out_2_tval = 32'h0; // @[ClockDomain.scala:14:9]
wire [31:0] element_reset_domain_auto_element_trace_core_nodes_out_2_cause = 32'h0; // @[ClockDomain.scala:14:9]
wire [31:0] element_reset_domain_auto_element_trace_core_nodes_out_1_group_0_iaddr = 32'h0; // @[ClockDomain.scala:14:9]
wire [31:0] element_reset_domain_auto_element_trace_core_nodes_out_1_tval = 32'h0; // @[ClockDomain.scala:14:9]
wire [31:0] element_reset_domain_auto_element_trace_core_nodes_out_1_cause = 32'h0; // @[ClockDomain.scala:14:9]
wire [31:0] element_reset_domain_auto_element_trace_core_nodes_out_0_group_0_iaddr = 32'h0; // @[ClockDomain.scala:14:9]
wire [31:0] element_reset_domain_auto_element_trace_core_nodes_out_0_tval = 32'h0; // @[ClockDomain.scala:14:9]
wire [31:0] element_reset_domain_auto_element_trace_core_nodes_out_0_cause = 32'h0; // @[ClockDomain.scala:14:9]
wire [3:0] auto_element_reset_domain_element_trace_core_nodes_out_3_group_0_itype = 4'h0; // @[ClockDomain.scala:14:9]
wire [3:0] auto_element_reset_domain_element_trace_core_nodes_out_3_priv = 4'h0; // @[ClockDomain.scala:14:9]
wire [3:0] auto_element_reset_domain_element_trace_core_nodes_out_2_group_0_itype = 4'h0; // @[ClockDomain.scala:14:9]
wire [3:0] auto_element_reset_domain_element_trace_core_nodes_out_2_priv = 4'h0; // @[ClockDomain.scala:14:9]
wire [3:0] auto_element_reset_domain_element_trace_core_nodes_out_1_group_0_itype = 4'h0; // @[ClockDomain.scala:14:9]
wire [3:0] auto_element_reset_domain_element_trace_core_nodes_out_1_priv = 4'h0; // @[ClockDomain.scala:14:9]
wire [3:0] auto_element_reset_domain_element_trace_core_nodes_out_0_group_0_itype = 4'h0; // @[ClockDomain.scala:14:9]
wire [3:0] auto_element_reset_domain_element_trace_core_nodes_out_0_priv = 4'h0; // @[ClockDomain.scala:14:9]
wire [3:0] element_reset_domain_auto_element_trace_core_nodes_out_3_group_0_itype = 4'h0; // @[ClockDomain.scala:14:9]
wire [3:0] element_reset_domain_auto_element_trace_core_nodes_out_3_priv = 4'h0; // @[ClockDomain.scala:14:9]
wire [3:0] element_reset_domain_auto_element_trace_core_nodes_out_2_group_0_itype = 4'h0; // @[ClockDomain.scala:14:9]
wire [3:0] element_reset_domain_auto_element_trace_core_nodes_out_2_priv = 4'h0; // @[ClockDomain.scala:14:9]
wire [3:0] element_reset_domain_auto_element_trace_core_nodes_out_1_group_0_itype = 4'h0; // @[ClockDomain.scala:14:9]
wire [3:0] element_reset_domain_auto_element_trace_core_nodes_out_1_priv = 4'h0; // @[ClockDomain.scala:14:9]
wire [3:0] element_reset_domain_auto_element_trace_core_nodes_out_0_group_0_itype = 4'h0; // @[ClockDomain.scala:14:9]
wire [3:0] element_reset_domain_auto_element_trace_core_nodes_out_0_priv = 4'h0; // @[ClockDomain.scala:14:9]
wire [31:0] auto_element_reset_domain_element_tile_reset_vector_nodes_in_3 = 32'h10000; // @[ClockDomain.scala:14:9]
wire [31:0] auto_element_reset_domain_element_tile_reset_vector_nodes_in_2 = 32'h10000; // @[ClockDomain.scala:14:9]
wire [31:0] auto_element_reset_domain_element_tile_reset_vector_nodes_in_1 = 32'h10000; // @[ClockDomain.scala:14:9]
wire [31:0] auto_element_reset_domain_element_tile_reset_vector_nodes_in_0 = 32'h10000; // @[ClockDomain.scala:14:9]
wire [31:0] element_reset_domain_auto_element_tile_reset_vector_nodes_in_3 = 32'h10000; // @[ClockDomain.scala:14:9]
wire [31:0] element_reset_domain_auto_element_tile_reset_vector_nodes_in_2 = 32'h10000; // @[ClockDomain.scala:14:9]
wire [31:0] element_reset_domain_auto_element_tile_reset_vector_nodes_in_1 = 32'h10000; // @[ClockDomain.scala:14:9]
wire [31:0] element_reset_domain_auto_element_tile_reset_vector_nodes_in_0 = 32'h10000; // @[ClockDomain.scala:14:9]
wire [2:0] auto_tl_master_clock_xing_out_b_bits_opcode = 3'h6; // @[ClockDomain.scala:14:9]
wire [2:0] tlMasterClockXingOut_b_bits_opcode = 3'h6; // @[MixedNode.scala:542:17]
wire [2:0] tlMasterClockXingIn_b_bits_opcode = 3'h6; // @[MixedNode.scala:551:17]
wire [3:0] auto_tl_master_clock_xing_out_b_bits_size = 4'h6; // @[ClockDomain.scala:14:9]
wire [3:0] tlMasterClockXingOut_b_bits_size = 4'h6; // @[MixedNode.scala:542:17]
wire [3:0] tlMasterClockXingIn_b_bits_size = 4'h6; // @[MixedNode.scala:551:17]
wire [7:0] auto_tl_master_clock_xing_out_b_bits_mask = 8'hFF; // @[ClockDomain.scala:14:9]
wire [7:0] tlMasterClockXingOut_b_bits_mask = 8'hFF; // @[MixedNode.scala:542:17]
wire [7:0] tlMasterClockXingIn_b_bits_mask = 8'hFF; // @[MixedNode.scala:551:17]
wire [63:0] auto_tl_master_clock_xing_out_b_bits_data = 64'h0; // @[ClockDomain.scala:14:9]
wire [63:0] tlMasterClockXingOut_b_bits_data = 64'h0; // @[MixedNode.scala:542:17]
wire [63:0] tlMasterClockXingIn_b_bits_data = 64'h0; // @[MixedNode.scala:551:17]
wire auto_element_reset_domain_element_trace_core_nodes_out_3_group_0_iretire = 1'h0; // @[ClockDomain.scala:14:9]
wire auto_element_reset_domain_element_trace_core_nodes_out_3_group_0_ilastsize = 1'h0; // @[ClockDomain.scala:14:9]
wire auto_element_reset_domain_element_trace_core_nodes_out_2_group_0_iretire = 1'h0; // @[ClockDomain.scala:14:9]
wire auto_element_reset_domain_element_trace_core_nodes_out_2_group_0_ilastsize = 1'h0; // @[ClockDomain.scala:14:9]
wire auto_element_reset_domain_element_trace_core_nodes_out_1_group_0_iretire = 1'h0; // @[ClockDomain.scala:14:9]
wire auto_element_reset_domain_element_trace_core_nodes_out_1_group_0_ilastsize = 1'h0; // @[ClockDomain.scala:14:9]
wire auto_element_reset_domain_element_trace_core_nodes_out_0_group_0_iretire = 1'h0; // @[ClockDomain.scala:14:9]
wire auto_element_reset_domain_element_trace_core_nodes_out_0_group_0_ilastsize = 1'h0; // @[ClockDomain.scala:14:9]
wire auto_tl_master_clock_xing_out_b_bits_corrupt = 1'h0; // @[ClockDomain.scala:14:9]
wire _childClock_T = 1'h0; // @[LazyModuleImp.scala:160:25]
wire element_reset_domain_auto_element_xbar_anon_out_2_0 = 1'h0; // @[ClockDomain.scala:14:9]
wire element_reset_domain_auto_element_xbar_anon_out_2_1 = 1'h0; // @[ClockDomain.scala:14:9]
wire element_reset_domain_auto_element_xbar_anon_out_2_2 = 1'h0; // @[ClockDomain.scala:14:9]
wire element_reset_domain_auto_element_xbar_anon_out_2_3 = 1'h0; // @[ClockDomain.scala:14:9]
wire element_reset_domain_auto_element_xbar_anon_out_2_4 = 1'h0; // @[ClockDomain.scala:14:9]
wire element_reset_domain_auto_element_xbar_anon_out_1_4 = 1'h0; // @[ClockDomain.scala:14:9]
wire element_reset_domain_auto_element_xbar_anon_out_0_0 = 1'h0; // @[ClockDomain.scala:14:9]
wire element_reset_domain_auto_element_xbar_anon_out_0_1 = 1'h0; // @[ClockDomain.scala:14:9]
wire element_reset_domain_auto_element_xbar_anon_out_0_2 = 1'h0; // @[ClockDomain.scala:14:9]
wire element_reset_domain_auto_element_xbar_anon_out_0_3 = 1'h0; // @[ClockDomain.scala:14:9]
wire element_reset_domain_auto_element_xbar_anon_out_0_4 = 1'h0; // @[ClockDomain.scala:14:9]
wire element_reset_domain_auto_element_trace_core_nodes_out_3_group_0_iretire = 1'h0; // @[ClockDomain.scala:14:9]
wire element_reset_domain_auto_element_trace_core_nodes_out_3_group_0_ilastsize = 1'h0; // @[ClockDomain.scala:14:9]
wire element_reset_domain_auto_element_trace_core_nodes_out_2_group_0_iretire = 1'h0; // @[ClockDomain.scala:14:9]
wire element_reset_domain_auto_element_trace_core_nodes_out_2_group_0_ilastsize = 1'h0; // @[ClockDomain.scala:14:9]
wire element_reset_domain_auto_element_trace_core_nodes_out_1_group_0_iretire = 1'h0; // @[ClockDomain.scala:14:9]
wire element_reset_domain_auto_element_trace_core_nodes_out_1_group_0_ilastsize = 1'h0; // @[ClockDomain.scala:14:9]
wire element_reset_domain_auto_element_trace_core_nodes_out_0_group_0_iretire = 1'h0; // @[ClockDomain.scala:14:9]
wire element_reset_domain_auto_element_trace_core_nodes_out_0_group_0_ilastsize = 1'h0; // @[ClockDomain.scala:14:9]
wire element_reset_domain__childClock_T = 1'h0; // @[LazyModuleImp.scala:160:25]
wire clockNode_childClock = 1'h0; // @[LazyModuleImp.scala:155:31]
wire clockNode_childReset = 1'h0; // @[LazyModuleImp.scala:158:31]
wire clockNode__childClock_T = 1'h0; // @[LazyModuleImp.scala:160:25]
wire tlMasterClockXingOut_b_bits_corrupt = 1'h0; // @[MixedNode.scala:542:17]
wire tlMasterClockXingIn_b_bits_corrupt = 1'h0; // @[MixedNode.scala:551:17]
wire element_reset_domain_auto_element_all_clock_groups_in_1_member_ccbus0_0_clock = auto_element_reset_domain_element_all_clock_groups_in_1_member_ccbus0_0_clock_0; // @[ClockDomain.scala:14:9]
wire element_reset_domain_auto_element_all_clock_groups_in_1_member_ccbus0_0_reset = auto_element_reset_domain_element_all_clock_groups_in_1_member_ccbus0_0_reset_0; // @[ClockDomain.scala:14:9]
wire element_reset_domain_auto_element_all_clock_groups_in_0_member_csbus0_0_clock = auto_element_reset_domain_element_all_clock_groups_in_0_member_csbus0_0_clock_0; // @[ClockDomain.scala:14:9]
wire element_reset_domain_auto_element_all_clock_groups_in_0_member_csbus0_0_reset = auto_element_reset_domain_element_all_clock_groups_in_0_member_csbus0_0_reset_0; // @[ClockDomain.scala:14:9]
wire element_reset_domain_auto_element_trace_nodes_out_3_insns_0_valid; // @[ClockDomain.scala:14:9]
wire [39:0] element_reset_domain_auto_element_trace_nodes_out_3_insns_0_iaddr; // @[ClockDomain.scala:14:9]
wire [31:0] element_reset_domain_auto_element_trace_nodes_out_3_insns_0_insn; // @[ClockDomain.scala:14:9]
wire [2:0] element_reset_domain_auto_element_trace_nodes_out_3_insns_0_priv; // @[ClockDomain.scala:14:9]
wire element_reset_domain_auto_element_trace_nodes_out_3_insns_0_exception; // @[ClockDomain.scala:14:9]
wire element_reset_domain_auto_element_trace_nodes_out_3_insns_0_interrupt; // @[ClockDomain.scala:14:9]
wire [63:0] element_reset_domain_auto_element_trace_nodes_out_3_insns_0_cause; // @[ClockDomain.scala:14:9]
wire [39:0] element_reset_domain_auto_element_trace_nodes_out_3_insns_0_tval; // @[ClockDomain.scala:14:9]
wire [63:0] element_reset_domain_auto_element_trace_nodes_out_3_time; // @[ClockDomain.scala:14:9]
wire element_reset_domain_auto_element_trace_nodes_out_2_insns_0_valid; // @[ClockDomain.scala:14:9]
wire [39:0] element_reset_domain_auto_element_trace_nodes_out_2_insns_0_iaddr; // @[ClockDomain.scala:14:9]
wire [31:0] element_reset_domain_auto_element_trace_nodes_out_2_insns_0_insn; // @[ClockDomain.scala:14:9]
wire [2:0] element_reset_domain_auto_element_trace_nodes_out_2_insns_0_priv; // @[ClockDomain.scala:14:9]
wire element_reset_domain_auto_element_trace_nodes_out_2_insns_0_exception; // @[ClockDomain.scala:14:9]
wire element_reset_domain_auto_element_trace_nodes_out_2_insns_0_interrupt; // @[ClockDomain.scala:14:9]
wire [63:0] element_reset_domain_auto_element_trace_nodes_out_2_insns_0_cause; // @[ClockDomain.scala:14:9]
wire [39:0] element_reset_domain_auto_element_trace_nodes_out_2_insns_0_tval; // @[ClockDomain.scala:14:9]
wire [63:0] element_reset_domain_auto_element_trace_nodes_out_2_time; // @[ClockDomain.scala:14:9]
wire element_reset_domain_auto_element_trace_nodes_out_1_insns_0_valid; // @[ClockDomain.scala:14:9]
wire [39:0] element_reset_domain_auto_element_trace_nodes_out_1_insns_0_iaddr; // @[ClockDomain.scala:14:9]
wire [31:0] element_reset_domain_auto_element_trace_nodes_out_1_insns_0_insn; // @[ClockDomain.scala:14:9]
wire [2:0] element_reset_domain_auto_element_trace_nodes_out_1_insns_0_priv; // @[ClockDomain.scala:14:9]
wire element_reset_domain_auto_element_trace_nodes_out_1_insns_0_exception; // @[ClockDomain.scala:14:9]
wire element_reset_domain_auto_element_trace_nodes_out_1_insns_0_interrupt; // @[ClockDomain.scala:14:9]
wire [63:0] element_reset_domain_auto_element_trace_nodes_out_1_insns_0_cause; // @[ClockDomain.scala:14:9]
wire [39:0] element_reset_domain_auto_element_trace_nodes_out_1_insns_0_tval; // @[ClockDomain.scala:14:9]
wire [63:0] element_reset_domain_auto_element_trace_nodes_out_1_time; // @[ClockDomain.scala:14:9]
wire element_reset_domain_auto_element_trace_nodes_out_0_insns_0_valid; // @[ClockDomain.scala:14:9]
wire [39:0] element_reset_domain_auto_element_trace_nodes_out_0_insns_0_iaddr; // @[ClockDomain.scala:14:9]
wire [31:0] element_reset_domain_auto_element_trace_nodes_out_0_insns_0_insn; // @[ClockDomain.scala:14:9]
wire [2:0] element_reset_domain_auto_element_trace_nodes_out_0_insns_0_priv; // @[ClockDomain.scala:14:9]
wire element_reset_domain_auto_element_trace_nodes_out_0_insns_0_exception; // @[ClockDomain.scala:14:9]
wire element_reset_domain_auto_element_trace_nodes_out_0_insns_0_interrupt; // @[ClockDomain.scala:14:9]
wire [63:0] element_reset_domain_auto_element_trace_nodes_out_0_insns_0_cause; // @[ClockDomain.scala:14:9]
wire [39:0] element_reset_domain_auto_element_trace_nodes_out_0_insns_0_tval; // @[ClockDomain.scala:14:9]
wire [63:0] element_reset_domain_auto_element_trace_nodes_out_0_time; // @[ClockDomain.scala:14:9]
wire [2:0] element_reset_domain_auto_element_tile_hart_id_nodes_in_3 = auto_element_reset_domain_element_tile_hart_id_nodes_in_3_0; // @[ClockDomain.scala:14:9]
wire [2:0] element_reset_domain_auto_element_tile_hart_id_nodes_in_2 = auto_element_reset_domain_element_tile_hart_id_nodes_in_2_0; // @[ClockDomain.scala:14:9]
wire [2:0] element_reset_domain_auto_element_tile_hart_id_nodes_in_1 = auto_element_reset_domain_element_tile_hart_id_nodes_in_1_0; // @[ClockDomain.scala:14:9]
wire [2:0] element_reset_domain_auto_element_tile_hart_id_nodes_in_0 = auto_element_reset_domain_element_tile_hart_id_nodes_in_0_0; // @[ClockDomain.scala:14:9]
wire element_reset_domain_auto_element_debug_nodes_in_3_sync_0 = auto_element_reset_domain_element_debug_nodes_in_3_sync_0_0; // @[ClockDomain.scala:14:9]
wire element_reset_domain_auto_element_debug_nodes_in_2_sync_0 = auto_element_reset_domain_element_debug_nodes_in_2_sync_0_0; // @[ClockDomain.scala:14:9]
wire element_reset_domain_auto_element_debug_nodes_in_1_sync_0 = auto_element_reset_domain_element_debug_nodes_in_1_sync_0_0; // @[ClockDomain.scala:14:9]
wire element_reset_domain_auto_element_debug_nodes_in_0_sync_0 = auto_element_reset_domain_element_debug_nodes_in_0_sync_0_0; // @[ClockDomain.scala:14:9]
wire intOutClockXingOut_2_sync_0; // @[MixedNode.scala:542:17]
wire intOutClockXingOut_2_sync_1; // @[MixedNode.scala:542:17]
wire intOutClockXingOut_2_sync_2; // @[MixedNode.scala:542:17]
wire intOutClockXingOut_2_sync_3; // @[MixedNode.scala:542:17]
wire intOutClockXingOut_2_sync_4; // @[MixedNode.scala:542:17]
wire intOutClockXingOut_1_sync_0; // @[MixedNode.scala:542:17]
wire intOutClockXingOut_1_sync_1; // @[MixedNode.scala:542:17]
wire intOutClockXingOut_1_sync_2; // @[MixedNode.scala:542:17]
wire intOutClockXingOut_1_sync_3; // @[MixedNode.scala:542:17]
wire intOutClockXingOut_1_sync_4; // @[MixedNode.scala:542:17]
wire intOutClockXingOut_sync_0; // @[MixedNode.scala:542:17]
wire intOutClockXingOut_sync_1; // @[MixedNode.scala:542:17]
wire intOutClockXingOut_sync_2; // @[MixedNode.scala:542:17]
wire intOutClockXingOut_sync_3; // @[MixedNode.scala:542:17]
wire intOutClockXingOut_sync_4; // @[MixedNode.scala:542:17]
wire intInClockXingIn_11_sync_0 = auto_int_in_clock_xing_in_11_sync_0_0; // @[ClockDomain.scala:14:9]
wire intInClockXingIn_10_sync_0 = auto_int_in_clock_xing_in_10_sync_0_0; // @[ClockDomain.scala:14:9]
wire intInClockXingIn_9_sync_0 = auto_int_in_clock_xing_in_9_sync_0_0; // @[ClockDomain.scala:14:9]
wire intInClockXingIn_8_sync_0 = auto_int_in_clock_xing_in_8_sync_0_0; // @[ClockDomain.scala:14:9]
wire intInClockXingIn_7_sync_0 = auto_int_in_clock_xing_in_7_sync_0_0; // @[ClockDomain.scala:14:9]
wire intInClockXingIn_6_sync_0 = auto_int_in_clock_xing_in_6_sync_0_0; // @[ClockDomain.scala:14:9]
wire intInClockXingIn_5_sync_0 = auto_int_in_clock_xing_in_5_sync_0_0; // @[ClockDomain.scala:14:9]
wire intInClockXingIn_4_sync_0 = auto_int_in_clock_xing_in_4_sync_0_0; // @[ClockDomain.scala:14:9]
wire intInClockXingIn_3_sync_0 = auto_int_in_clock_xing_in_3_sync_0_0; // @[ClockDomain.scala:14:9]
wire intInClockXingIn_3_sync_1 = auto_int_in_clock_xing_in_3_sync_1_0; // @[ClockDomain.scala:14:9]
wire intInClockXingIn_2_sync_0 = auto_int_in_clock_xing_in_2_sync_0_0; // @[ClockDomain.scala:14:9]
wire intInClockXingIn_2_sync_1 = auto_int_in_clock_xing_in_2_sync_1_0; // @[ClockDomain.scala:14:9]
wire intInClockXingIn_1_sync_0 = auto_int_in_clock_xing_in_1_sync_0_0; // @[ClockDomain.scala:14:9]
wire intInClockXingIn_1_sync_1 = auto_int_in_clock_xing_in_1_sync_1_0; // @[ClockDomain.scala:14:9]
wire intInClockXingIn_sync_0 = auto_int_in_clock_xing_in_0_sync_0_0; // @[ClockDomain.scala:14:9]
wire intInClockXingIn_sync_1 = auto_int_in_clock_xing_in_0_sync_1_0; // @[ClockDomain.scala:14:9]
wire tlMasterClockXingOut_a_ready = auto_tl_master_clock_xing_out_a_ready_0; // @[ClockDomain.scala:14:9]
wire tlMasterClockXingOut_a_valid; // @[MixedNode.scala:542:17]
wire [2:0] tlMasterClockXingOut_a_bits_opcode; // @[MixedNode.scala:542:17]
wire [2:0] tlMasterClockXingOut_a_bits_param; // @[MixedNode.scala:542:17]
wire [3:0] tlMasterClockXingOut_a_bits_size; // @[MixedNode.scala:542:17]
wire [3:0] tlMasterClockXingOut_a_bits_source; // @[MixedNode.scala:542:17]
wire [31:0] tlMasterClockXingOut_a_bits_address; // @[MixedNode.scala:542:17]
wire [7:0] tlMasterClockXingOut_a_bits_mask; // @[MixedNode.scala:542:17]
wire [63:0] tlMasterClockXingOut_a_bits_data; // @[MixedNode.scala:542:17]
wire tlMasterClockXingOut_a_bits_corrupt; // @[MixedNode.scala:542:17]
wire tlMasterClockXingOut_b_ready; // @[MixedNode.scala:542:17]
wire tlMasterClockXingOut_b_valid = auto_tl_master_clock_xing_out_b_valid_0; // @[ClockDomain.scala:14:9]
wire [1:0] tlMasterClockXingOut_b_bits_param = auto_tl_master_clock_xing_out_b_bits_param_0; // @[ClockDomain.scala:14:9]
wire [3:0] tlMasterClockXingOut_b_bits_source = auto_tl_master_clock_xing_out_b_bits_source_0; // @[ClockDomain.scala:14:9]
wire [31:0] tlMasterClockXingOut_b_bits_address = auto_tl_master_clock_xing_out_b_bits_address_0; // @[ClockDomain.scala:14:9]
wire tlMasterClockXingOut_c_ready = auto_tl_master_clock_xing_out_c_ready_0; // @[ClockDomain.scala:14:9]
wire tlMasterClockXingOut_c_valid; // @[MixedNode.scala:542:17]
wire [2:0] tlMasterClockXingOut_c_bits_opcode; // @[MixedNode.scala:542:17]
wire [2:0] tlMasterClockXingOut_c_bits_param; // @[MixedNode.scala:542:17]
wire [3:0] tlMasterClockXingOut_c_bits_size; // @[MixedNode.scala:542:17]
wire [3:0] tlMasterClockXingOut_c_bits_source; // @[MixedNode.scala:542:17]
wire [31:0] tlMasterClockXingOut_c_bits_address; // @[MixedNode.scala:542:17]
wire [63:0] tlMasterClockXingOut_c_bits_data; // @[MixedNode.scala:542:17]
wire tlMasterClockXingOut_c_bits_corrupt; // @[MixedNode.scala:542:17]
wire tlMasterClockXingOut_d_ready; // @[MixedNode.scala:542:17]
wire tlMasterClockXingOut_d_valid = auto_tl_master_clock_xing_out_d_valid_0; // @[ClockDomain.scala:14:9]
wire [2:0] tlMasterClockXingOut_d_bits_opcode = auto_tl_master_clock_xing_out_d_bits_opcode_0; // @[ClockDomain.scala:14:9]
wire [1:0] tlMasterClockXingOut_d_bits_param = auto_tl_master_clock_xing_out_d_bits_param_0; // @[ClockDomain.scala:14:9]
wire [3:0] tlMasterClockXingOut_d_bits_size = auto_tl_master_clock_xing_out_d_bits_size_0; // @[ClockDomain.scala:14:9]
wire [3:0] tlMasterClockXingOut_d_bits_source = auto_tl_master_clock_xing_out_d_bits_source_0; // @[ClockDomain.scala:14:9]
wire [2:0] tlMasterClockXingOut_d_bits_sink = auto_tl_master_clock_xing_out_d_bits_sink_0; // @[ClockDomain.scala:14:9]
wire tlMasterClockXingOut_d_bits_denied = auto_tl_master_clock_xing_out_d_bits_denied_0; // @[ClockDomain.scala:14:9]
wire [63:0] tlMasterClockXingOut_d_bits_data = auto_tl_master_clock_xing_out_d_bits_data_0; // @[ClockDomain.scala:14:9]
wire tlMasterClockXingOut_d_bits_corrupt = auto_tl_master_clock_xing_out_d_bits_corrupt_0; // @[ClockDomain.scala:14:9]
wire tlMasterClockXingOut_e_ready = auto_tl_master_clock_xing_out_e_ready_0; // @[ClockDomain.scala:14:9]
wire tlMasterClockXingOut_e_valid; // @[MixedNode.scala:542:17]
wire [2:0] tlMasterClockXingOut_e_bits_sink; // @[MixedNode.scala:542:17]
wire auto_element_reset_domain_element_trace_nodes_out_3_insns_0_valid_0; // @[ClockDomain.scala:14:9]
wire [39:0] auto_element_reset_domain_element_trace_nodes_out_3_insns_0_iaddr_0; // @[ClockDomain.scala:14:9]
wire [31:0] auto_element_reset_domain_element_trace_nodes_out_3_insns_0_insn_0; // @[ClockDomain.scala:14:9]
wire [2:0] auto_element_reset_domain_element_trace_nodes_out_3_insns_0_priv_0; // @[ClockDomain.scala:14:9]
wire auto_element_reset_domain_element_trace_nodes_out_3_insns_0_exception_0; // @[ClockDomain.scala:14:9]
wire auto_element_reset_domain_element_trace_nodes_out_3_insns_0_interrupt_0; // @[ClockDomain.scala:14:9]
wire [63:0] auto_element_reset_domain_element_trace_nodes_out_3_insns_0_cause_0; // @[ClockDomain.scala:14:9]
wire [39:0] auto_element_reset_domain_element_trace_nodes_out_3_insns_0_tval_0; // @[ClockDomain.scala:14:9]
wire [63:0] auto_element_reset_domain_element_trace_nodes_out_3_time_0; // @[ClockDomain.scala:14:9]
wire auto_element_reset_domain_element_trace_nodes_out_2_insns_0_valid_0; // @[ClockDomain.scala:14:9]
wire [39:0] auto_element_reset_domain_element_trace_nodes_out_2_insns_0_iaddr_0; // @[ClockDomain.scala:14:9]
wire [31:0] auto_element_reset_domain_element_trace_nodes_out_2_insns_0_insn_0; // @[ClockDomain.scala:14:9]
wire [2:0] auto_element_reset_domain_element_trace_nodes_out_2_insns_0_priv_0; // @[ClockDomain.scala:14:9]
wire auto_element_reset_domain_element_trace_nodes_out_2_insns_0_exception_0; // @[ClockDomain.scala:14:9]
wire auto_element_reset_domain_element_trace_nodes_out_2_insns_0_interrupt_0; // @[ClockDomain.scala:14:9]
wire [63:0] auto_element_reset_domain_element_trace_nodes_out_2_insns_0_cause_0; // @[ClockDomain.scala:14:9]
wire [39:0] auto_element_reset_domain_element_trace_nodes_out_2_insns_0_tval_0; // @[ClockDomain.scala:14:9]
wire [63:0] auto_element_reset_domain_element_trace_nodes_out_2_time_0; // @[ClockDomain.scala:14:9]
wire auto_element_reset_domain_element_trace_nodes_out_1_insns_0_valid_0; // @[ClockDomain.scala:14:9]
wire [39:0] auto_element_reset_domain_element_trace_nodes_out_1_insns_0_iaddr_0; // @[ClockDomain.scala:14:9]
wire [31:0] auto_element_reset_domain_element_trace_nodes_out_1_insns_0_insn_0; // @[ClockDomain.scala:14:9]
wire [2:0] auto_element_reset_domain_element_trace_nodes_out_1_insns_0_priv_0; // @[ClockDomain.scala:14:9]
wire auto_element_reset_domain_element_trace_nodes_out_1_insns_0_exception_0; // @[ClockDomain.scala:14:9]
wire auto_element_reset_domain_element_trace_nodes_out_1_insns_0_interrupt_0; // @[ClockDomain.scala:14:9]
wire [63:0] auto_element_reset_domain_element_trace_nodes_out_1_insns_0_cause_0; // @[ClockDomain.scala:14:9]
wire [39:0] auto_element_reset_domain_element_trace_nodes_out_1_insns_0_tval_0; // @[ClockDomain.scala:14:9]
wire [63:0] auto_element_reset_domain_element_trace_nodes_out_1_time_0; // @[ClockDomain.scala:14:9]
wire auto_element_reset_domain_element_trace_nodes_out_0_insns_0_valid_0; // @[ClockDomain.scala:14:9]
wire [39:0] auto_element_reset_domain_element_trace_nodes_out_0_insns_0_iaddr_0; // @[ClockDomain.scala:14:9]
wire [31:0] auto_element_reset_domain_element_trace_nodes_out_0_insns_0_insn_0; // @[ClockDomain.scala:14:9]
wire [2:0] auto_element_reset_domain_element_trace_nodes_out_0_insns_0_priv_0; // @[ClockDomain.scala:14:9]
wire auto_element_reset_domain_element_trace_nodes_out_0_insns_0_exception_0; // @[ClockDomain.scala:14:9]
wire auto_element_reset_domain_element_trace_nodes_out_0_insns_0_interrupt_0; // @[ClockDomain.scala:14:9]
wire [63:0] auto_element_reset_domain_element_trace_nodes_out_0_insns_0_cause_0; // @[ClockDomain.scala:14:9]
wire [39:0] auto_element_reset_domain_element_trace_nodes_out_0_insns_0_tval_0; // @[ClockDomain.scala:14:9]
wire [63:0] auto_element_reset_domain_element_trace_nodes_out_0_time_0; // @[ClockDomain.scala:14:9]
wire auto_int_out_clock_xing_out_2_sync_0_0; // @[ClockDomain.scala:14:9]
wire auto_int_out_clock_xing_out_2_sync_1_0; // @[ClockDomain.scala:14:9]
wire auto_int_out_clock_xing_out_2_sync_2_0; // @[ClockDomain.scala:14:9]
wire auto_int_out_clock_xing_out_2_sync_3_0; // @[ClockDomain.scala:14:9]
wire auto_int_out_clock_xing_out_2_sync_4_0; // @[ClockDomain.scala:14:9]
wire auto_int_out_clock_xing_out_1_sync_0_0; // @[ClockDomain.scala:14:9]
wire auto_int_out_clock_xing_out_1_sync_1_0; // @[ClockDomain.scala:14:9]
wire auto_int_out_clock_xing_out_1_sync_2_0; // @[ClockDomain.scala:14:9]
wire auto_int_out_clock_xing_out_1_sync_3_0; // @[ClockDomain.scala:14:9]
wire auto_int_out_clock_xing_out_1_sync_4_0; // @[ClockDomain.scala:14:9]
wire auto_int_out_clock_xing_out_0_sync_0_0; // @[ClockDomain.scala:14:9]
wire auto_int_out_clock_xing_out_0_sync_1_0; // @[ClockDomain.scala:14:9]
wire auto_int_out_clock_xing_out_0_sync_2_0; // @[ClockDomain.scala:14:9]
wire auto_int_out_clock_xing_out_0_sync_3_0; // @[ClockDomain.scala:14:9]
wire auto_int_out_clock_xing_out_0_sync_4_0; // @[ClockDomain.scala:14:9]
wire [2:0] auto_tl_master_clock_xing_out_a_bits_opcode_0; // @[ClockDomain.scala:14:9]
wire [2:0] auto_tl_master_clock_xing_out_a_bits_param_0; // @[ClockDomain.scala:14:9]
wire [3:0] auto_tl_master_clock_xing_out_a_bits_size_0; // @[ClockDomain.scala:14:9]
wire [3:0] auto_tl_master_clock_xing_out_a_bits_source_0; // @[ClockDomain.scala:14:9]
wire [31:0] auto_tl_master_clock_xing_out_a_bits_address_0; // @[ClockDomain.scala:14:9]
wire [7:0] auto_tl_master_clock_xing_out_a_bits_mask_0; // @[ClockDomain.scala:14:9]
wire [63:0] auto_tl_master_clock_xing_out_a_bits_data_0; // @[ClockDomain.scala:14:9]
wire auto_tl_master_clock_xing_out_a_bits_corrupt_0; // @[ClockDomain.scala:14:9]
wire auto_tl_master_clock_xing_out_a_valid_0; // @[ClockDomain.scala:14:9]
wire auto_tl_master_clock_xing_out_b_ready_0; // @[ClockDomain.scala:14:9]
wire [2:0] auto_tl_master_clock_xing_out_c_bits_opcode_0; // @[ClockDomain.scala:14:9]
wire [2:0] auto_tl_master_clock_xing_out_c_bits_param_0; // @[ClockDomain.scala:14:9]
wire [3:0] auto_tl_master_clock_xing_out_c_bits_size_0; // @[ClockDomain.scala:14:9]
wire [3:0] auto_tl_master_clock_xing_out_c_bits_source_0; // @[ClockDomain.scala:14:9]
wire [31:0] auto_tl_master_clock_xing_out_c_bits_address_0; // @[ClockDomain.scala:14:9]
wire [63:0] auto_tl_master_clock_xing_out_c_bits_data_0; // @[ClockDomain.scala:14:9]
wire auto_tl_master_clock_xing_out_c_bits_corrupt_0; // @[ClockDomain.scala:14:9]
wire auto_tl_master_clock_xing_out_c_valid_0; // @[ClockDomain.scala:14:9]
wire auto_tl_master_clock_xing_out_d_ready_0; // @[ClockDomain.scala:14:9]
wire [2:0] auto_tl_master_clock_xing_out_e_bits_sink_0; // @[ClockDomain.scala:14:9]
wire auto_tl_master_clock_xing_out_e_valid_0; // @[ClockDomain.scala:14:9]
wire tapClockNodeIn_clock; // @[MixedNode.scala:551:17]
wire tapClockNodeIn_reset; // @[MixedNode.scala:551:17]
wire childClock; // @[LazyModuleImp.scala:155:31]
wire childReset; // @[LazyModuleImp.scala:158:31]
assign tapClockNodeIn_clock = element_reset_domain_auto_element_csbus0_fixedClockNode_anon_out_clock; // @[ClockDomain.scala:14:9]
assign tapClockNodeIn_reset = element_reset_domain_auto_element_csbus0_fixedClockNode_anon_out_reset; // @[ClockDomain.scala:14:9]
assign auto_element_reset_domain_element_trace_nodes_out_3_insns_0_valid_0 = element_reset_domain_auto_element_trace_nodes_out_3_insns_0_valid; // @[ClockDomain.scala:14:9]
assign auto_element_reset_domain_element_trace_nodes_out_3_insns_0_iaddr_0 = element_reset_domain_auto_element_trace_nodes_out_3_insns_0_iaddr; // @[ClockDomain.scala:14:9]
assign auto_element_reset_domain_element_trace_nodes_out_3_insns_0_insn_0 = element_reset_domain_auto_element_trace_nodes_out_3_insns_0_insn; // @[ClockDomain.scala:14:9]
assign auto_element_reset_domain_element_trace_nodes_out_3_insns_0_priv_0 = element_reset_domain_auto_element_trace_nodes_out_3_insns_0_priv; // @[ClockDomain.scala:14:9]
assign auto_element_reset_domain_element_trace_nodes_out_3_insns_0_exception_0 = element_reset_domain_auto_element_trace_nodes_out_3_insns_0_exception; // @[ClockDomain.scala:14:9]
assign auto_element_reset_domain_element_trace_nodes_out_3_insns_0_interrupt_0 = element_reset_domain_auto_element_trace_nodes_out_3_insns_0_interrupt; // @[ClockDomain.scala:14:9]
assign auto_element_reset_domain_element_trace_nodes_out_3_insns_0_cause_0 = element_reset_domain_auto_element_trace_nodes_out_3_insns_0_cause; // @[ClockDomain.scala:14:9]
assign auto_element_reset_domain_element_trace_nodes_out_3_insns_0_tval_0 = element_reset_domain_auto_element_trace_nodes_out_3_insns_0_tval; // @[ClockDomain.scala:14:9]
assign auto_element_reset_domain_element_trace_nodes_out_3_time_0 = element_reset_domain_auto_element_trace_nodes_out_3_time; // @[ClockDomain.scala:14:9]
assign auto_element_reset_domain_element_trace_nodes_out_2_insns_0_valid_0 = element_reset_domain_auto_element_trace_nodes_out_2_insns_0_valid; // @[ClockDomain.scala:14:9]
assign auto_element_reset_domain_element_trace_nodes_out_2_insns_0_iaddr_0 = element_reset_domain_auto_element_trace_nodes_out_2_insns_0_iaddr; // @[ClockDomain.scala:14:9]
assign auto_element_reset_domain_element_trace_nodes_out_2_insns_0_insn_0 = element_reset_domain_auto_element_trace_nodes_out_2_insns_0_insn; // @[ClockDomain.scala:14:9]
assign auto_element_reset_domain_element_trace_nodes_out_2_insns_0_priv_0 = element_reset_domain_auto_element_trace_nodes_out_2_insns_0_priv; // @[ClockDomain.scala:14:9]
assign auto_element_reset_domain_element_trace_nodes_out_2_insns_0_exception_0 = element_reset_domain_auto_element_trace_nodes_out_2_insns_0_exception; // @[ClockDomain.scala:14:9]
assign auto_element_reset_domain_element_trace_nodes_out_2_insns_0_interrupt_0 = element_reset_domain_auto_element_trace_nodes_out_2_insns_0_interrupt; // @[ClockDomain.scala:14:9]
assign auto_element_reset_domain_element_trace_nodes_out_2_insns_0_cause_0 = element_reset_domain_auto_element_trace_nodes_out_2_insns_0_cause; // @[ClockDomain.scala:14:9]
assign auto_element_reset_domain_element_trace_nodes_out_2_insns_0_tval_0 = element_reset_domain_auto_element_trace_nodes_out_2_insns_0_tval; // @[ClockDomain.scala:14:9]
assign auto_element_reset_domain_element_trace_nodes_out_2_time_0 = element_reset_domain_auto_element_trace_nodes_out_2_time; // @[ClockDomain.scala:14:9]
assign auto_element_reset_domain_element_trace_nodes_out_1_insns_0_valid_0 = element_reset_domain_auto_element_trace_nodes_out_1_insns_0_valid; // @[ClockDomain.scala:14:9]
assign auto_element_reset_domain_element_trace_nodes_out_1_insns_0_iaddr_0 = element_reset_domain_auto_element_trace_nodes_out_1_insns_0_iaddr; // @[ClockDomain.scala:14:9]
assign auto_element_reset_domain_element_trace_nodes_out_1_insns_0_insn_0 = element_reset_domain_auto_element_trace_nodes_out_1_insns_0_insn; // @[ClockDomain.scala:14:9]
assign auto_element_reset_domain_element_trace_nodes_out_1_insns_0_priv_0 = element_reset_domain_auto_element_trace_nodes_out_1_insns_0_priv; // @[ClockDomain.scala:14:9]
assign auto_element_reset_domain_element_trace_nodes_out_1_insns_0_exception_0 = element_reset_domain_auto_element_trace_nodes_out_1_insns_0_exception; // @[ClockDomain.scala:14:9]
assign auto_element_reset_domain_element_trace_nodes_out_1_insns_0_interrupt_0 = element_reset_domain_auto_element_trace_nodes_out_1_insns_0_interrupt; // @[ClockDomain.scala:14:9]
assign auto_element_reset_domain_element_trace_nodes_out_1_insns_0_cause_0 = element_reset_domain_auto_element_trace_nodes_out_1_insns_0_cause; // @[ClockDomain.scala:14:9]
assign auto_element_reset_domain_element_trace_nodes_out_1_insns_0_tval_0 = element_reset_domain_auto_element_trace_nodes_out_1_insns_0_tval; // @[ClockDomain.scala:14:9]
assign auto_element_reset_domain_element_trace_nodes_out_1_time_0 = element_reset_domain_auto_element_trace_nodes_out_1_time; // @[ClockDomain.scala:14:9]
assign auto_element_reset_domain_element_trace_nodes_out_0_insns_0_valid_0 = element_reset_domain_auto_element_trace_nodes_out_0_insns_0_valid; // @[ClockDomain.scala:14:9]
assign auto_element_reset_domain_element_trace_nodes_out_0_insns_0_iaddr_0 = element_reset_domain_auto_element_trace_nodes_out_0_insns_0_iaddr; // @[ClockDomain.scala:14:9]
assign auto_element_reset_domain_element_trace_nodes_out_0_insns_0_insn_0 = element_reset_domain_auto_element_trace_nodes_out_0_insns_0_insn; // @[ClockDomain.scala:14:9]
assign auto_element_reset_domain_element_trace_nodes_out_0_insns_0_priv_0 = element_reset_domain_auto_element_trace_nodes_out_0_insns_0_priv; // @[ClockDomain.scala:14:9]
assign auto_element_reset_domain_element_trace_nodes_out_0_insns_0_exception_0 = element_reset_domain_auto_element_trace_nodes_out_0_insns_0_exception; // @[ClockDomain.scala:14:9]
assign auto_element_reset_domain_element_trace_nodes_out_0_insns_0_interrupt_0 = element_reset_domain_auto_element_trace_nodes_out_0_insns_0_interrupt; // @[ClockDomain.scala:14:9]
assign auto_element_reset_domain_element_trace_nodes_out_0_insns_0_cause_0 = element_reset_domain_auto_element_trace_nodes_out_0_insns_0_cause; // @[ClockDomain.scala:14:9]
assign auto_element_reset_domain_element_trace_nodes_out_0_insns_0_tval_0 = element_reset_domain_auto_element_trace_nodes_out_0_insns_0_tval; // @[ClockDomain.scala:14:9]
assign auto_element_reset_domain_element_trace_nodes_out_0_time_0 = element_reset_domain_auto_element_trace_nodes_out_0_time; // @[ClockDomain.scala:14:9]
wire clockNode_auto_anon_out_clock; // @[ClockGroup.scala:104:9]
wire element_reset_domain_clockNodeIn_clock = element_reset_domain_auto_clock_in_clock; // @[ClockDomain.scala:14:9]
wire clockNode_auto_anon_out_reset; // @[ClockGroup.scala:104:9]
wire [2:0] element_reset_domain_auto_element_buffer_out_a_bits_opcode; // @[ClockDomain.scala:14:9]
wire [2:0] element_reset_domain_auto_element_buffer_out_a_bits_param; // @[ClockDomain.scala:14:9]
wire [3:0] element_reset_domain_auto_element_buffer_out_a_bits_size; // @[ClockDomain.scala:14:9]
wire [3:0] element_reset_domain_auto_element_buffer_out_a_bits_source; // @[ClockDomain.scala:14:9]
wire [31:0] element_reset_domain_auto_element_buffer_out_a_bits_address; // @[ClockDomain.scala:14:9]
wire [7:0] element_reset_domain_auto_element_buffer_out_a_bits_mask; // @[ClockDomain.scala:14:9]
wire [63:0] element_reset_domain_auto_element_buffer_out_a_bits_data; // @[ClockDomain.scala:14:9]
wire element_reset_domain_auto_element_buffer_out_a_bits_corrupt; // @[ClockDomain.scala:14:9]
wire element_reset_domain_clockNodeIn_reset = element_reset_domain_auto_clock_in_reset; // @[ClockDomain.scala:14:9]
wire element_reset_domain_auto_element_buffer_out_a_ready; // @[ClockDomain.scala:14:9]
wire element_reset_domain_auto_element_buffer_out_a_valid; // @[ClockDomain.scala:14:9]
wire [2:0] element_reset_domain_auto_element_buffer_out_b_bits_opcode; // @[ClockDomain.scala:14:9]
wire [1:0] element_reset_domain_auto_element_buffer_out_b_bits_param; // @[ClockDomain.scala:14:9]
wire [3:0] element_reset_domain_auto_element_buffer_out_b_bits_size; // @[ClockDomain.scala:14:9]
wire [3:0] element_reset_domain_auto_element_buffer_out_b_bits_source; // @[ClockDomain.scala:14:9]
wire [31:0] element_reset_domain_auto_element_buffer_out_b_bits_address; // @[ClockDomain.scala:14:9]
wire [7:0] element_reset_domain_auto_element_buffer_out_b_bits_mask; // @[ClockDomain.scala:14:9]
wire [63:0] element_reset_domain_auto_element_buffer_out_b_bits_data; // @[ClockDomain.scala:14:9]
wire element_reset_domain_auto_element_buffer_out_b_bits_corrupt; // @[ClockDomain.scala:14:9]
wire element_reset_domain_auto_element_buffer_out_b_ready; // @[ClockDomain.scala:14:9]
wire element_reset_domain_auto_element_buffer_out_b_valid; // @[ClockDomain.scala:14:9]
wire [2:0] element_reset_domain_auto_element_buffer_out_c_bits_opcode; // @[ClockDomain.scala:14:9]
wire [2:0] element_reset_domain_auto_element_buffer_out_c_bits_param; // @[ClockDomain.scala:14:9]
wire [3:0] element_reset_domain_auto_element_buffer_out_c_bits_size; // @[ClockDomain.scala:14:9]
wire [3:0] element_reset_domain_auto_element_buffer_out_c_bits_source; // @[ClockDomain.scala:14:9]
wire [31:0] element_reset_domain_auto_element_buffer_out_c_bits_address; // @[ClockDomain.scala:14:9]
wire [63:0] element_reset_domain_auto_element_buffer_out_c_bits_data; // @[ClockDomain.scala:14:9]
wire element_reset_domain_auto_element_buffer_out_c_bits_corrupt; // @[ClockDomain.scala:14:9]
wire element_reset_domain_auto_element_buffer_out_c_ready; // @[ClockDomain.scala:14:9]
wire element_reset_domain_auto_element_buffer_out_c_valid; // @[ClockDomain.scala:14:9]
wire [2:0] element_reset_domain_auto_element_buffer_out_d_bits_opcode; // @[ClockDomain.scala:14:9]
wire [1:0] element_reset_domain_auto_element_buffer_out_d_bits_param; // @[ClockDomain.scala:14:9]
wire [3:0] element_reset_domain_auto_element_buffer_out_d_bits_size; // @[ClockDomain.scala:14:9]
wire [3:0] element_reset_domain_auto_element_buffer_out_d_bits_source; // @[ClockDomain.scala:14:9]
wire [2:0] element_reset_domain_auto_element_buffer_out_d_bits_sink; // @[ClockDomain.scala:14:9]
wire element_reset_domain_auto_element_buffer_out_d_bits_denied; // @[ClockDomain.scala:14:9]
wire [63:0] element_reset_domain_auto_element_buffer_out_d_bits_data; // @[ClockDomain.scala:14:9]
wire element_reset_domain_auto_element_buffer_out_d_bits_corrupt; // @[ClockDomain.scala:14:9]
wire element_reset_domain_auto_element_buffer_out_d_ready; // @[ClockDomain.scala:14:9]
wire element_reset_domain_auto_element_buffer_out_d_valid; // @[ClockDomain.scala:14:9]
wire [2:0] element_reset_domain_auto_element_buffer_out_e_bits_sink; // @[ClockDomain.scala:14:9]
wire element_reset_domain_auto_element_buffer_out_e_ready; // @[ClockDomain.scala:14:9]
wire element_reset_domain_auto_element_buffer_out_e_valid; // @[ClockDomain.scala:14:9]
wire element_reset_domain_auto_element_xbar_anon_out_1_0; // @[ClockDomain.scala:14:9]
wire element_reset_domain_auto_element_xbar_anon_out_1_1; // @[ClockDomain.scala:14:9]
wire element_reset_domain_auto_element_xbar_anon_out_1_2; // @[ClockDomain.scala:14:9]
wire element_reset_domain_auto_element_xbar_anon_out_1_3; // @[ClockDomain.scala:14:9]
wire element_reset_domain_auto_element_seip_nodes_in_3_0; // @[ClockDomain.scala:14:9]
wire element_reset_domain_auto_element_seip_nodes_in_2_0; // @[ClockDomain.scala:14:9]
wire element_reset_domain_auto_element_seip_nodes_in_1_0; // @[ClockDomain.scala:14:9]
wire element_reset_domain_auto_element_seip_nodes_in_0_0; // @[ClockDomain.scala:14:9]
wire element_reset_domain_auto_element_meip_nodes_in_3_0; // @[ClockDomain.scala:14:9]
wire element_reset_domain_auto_element_meip_nodes_in_2_0; // @[ClockDomain.scala:14:9]
wire element_reset_domain_auto_element_meip_nodes_in_1_0; // @[ClockDomain.scala:14:9]
wire element_reset_domain_auto_element_meip_nodes_in_0_0; // @[ClockDomain.scala:14:9]
wire element_reset_domain_auto_element_msip_nodes_in_3_0; // @[ClockDomain.scala:14:9]
wire element_reset_domain_auto_element_msip_nodes_in_3_1; // @[ClockDomain.scala:14:9]
wire element_reset_domain_auto_element_msip_nodes_in_2_0; // @[ClockDomain.scala:14:9]
wire element_reset_domain_auto_element_msip_nodes_in_2_1; // @[ClockDomain.scala:14:9]
wire element_reset_domain_auto_element_msip_nodes_in_1_0; // @[ClockDomain.scala:14:9]
wire element_reset_domain_auto_element_msip_nodes_in_1_1; // @[ClockDomain.scala:14:9]
wire element_reset_domain_auto_element_msip_nodes_in_0_0; // @[ClockDomain.scala:14:9]
wire element_reset_domain_auto_element_msip_nodes_in_0_1; // @[ClockDomain.scala:14:9]
wire element_reset_domain_childClock; // @[LazyModuleImp.scala:155:31]
wire element_reset_domain_childReset; // @[LazyModuleImp.scala:158:31]
assign element_reset_domain_childClock = element_reset_domain_clockNodeIn_clock; // @[MixedNode.scala:551:17]
assign element_reset_domain_childReset = element_reset_domain_clockNodeIn_reset; // @[MixedNode.scala:551:17]
wire tapClockNodeOut_clock; // @[MixedNode.scala:542:17]
wire clockNode_anonIn_clock = clockNode_auto_anon_in_clock; // @[ClockGroup.scala:104:9]
wire tapClockNodeOut_reset; // @[MixedNode.scala:542:17]
wire clockNode_anonOut_clock; // @[MixedNode.scala:542:17]
wire clockNode_anonIn_reset = clockNode_auto_anon_in_reset; // @[ClockGroup.scala:104:9]
assign element_reset_domain_auto_clock_in_clock = clockNode_auto_anon_out_clock; // @[ClockGroup.scala:104:9]
wire clockNode_anonOut_reset; // @[MixedNode.scala:542:17]
assign element_reset_domain_auto_clock_in_reset = clockNode_auto_anon_out_reset; // @[ClockGroup.scala:104:9]
assign clockNode_auto_anon_out_clock = clockNode_anonOut_clock; // @[ClockGroup.scala:104:9]
assign clockNode_auto_anon_out_reset = clockNode_anonOut_reset; // @[ClockGroup.scala:104:9]
assign clockNode_anonOut_clock = clockNode_anonIn_clock; // @[MixedNode.scala:542:17, :551:17]
assign clockNode_anonOut_reset = clockNode_anonIn_reset; // @[MixedNode.scala:542:17, :551:17]
assign clockNode_auto_anon_in_clock = tapClockNodeOut_clock; // @[ClockGroup.scala:104:9]
assign clockNode_auto_anon_in_reset = tapClockNodeOut_reset; // @[ClockGroup.scala:104:9]
assign childClock = tapClockNodeIn_clock; // @[MixedNode.scala:551:17]
assign tapClockNodeOut_clock = tapClockNodeIn_clock; // @[MixedNode.scala:542:17, :551:17]
assign childReset = tapClockNodeIn_reset; // @[MixedNode.scala:551:17]
assign tapClockNodeOut_reset = tapClockNodeIn_reset; // @[MixedNode.scala:542:17, :551:17]
wire tlMasterClockXingIn_a_ready = tlMasterClockXingOut_a_ready; // @[MixedNode.scala:542:17, :551:17]
wire tlMasterClockXingIn_a_valid; // @[MixedNode.scala:551:17]
assign auto_tl_master_clock_xing_out_a_valid_0 = tlMasterClockXingOut_a_valid; // @[ClockDomain.scala:14:9]
wire [2:0] tlMasterClockXingIn_a_bits_opcode; // @[MixedNode.scala:551:17]
assign auto_tl_master_clock_xing_out_a_bits_opcode_0 = tlMasterClockXingOut_a_bits_opcode; // @[ClockDomain.scala:14:9]
wire [2:0] tlMasterClockXingIn_a_bits_param; // @[MixedNode.scala:551:17]
assign auto_tl_master_clock_xing_out_a_bits_param_0 = tlMasterClockXingOut_a_bits_param; // @[ClockDomain.scala:14:9]
wire [3:0] tlMasterClockXingIn_a_bits_size; // @[MixedNode.scala:551:17]
assign auto_tl_master_clock_xing_out_a_bits_size_0 = tlMasterClockXingOut_a_bits_size; // @[ClockDomain.scala:14:9]
wire [3:0] tlMasterClockXingIn_a_bits_source; // @[MixedNode.scala:551:17]
assign auto_tl_master_clock_xing_out_a_bits_source_0 = tlMasterClockXingOut_a_bits_source; // @[ClockDomain.scala:14:9]
wire [31:0] tlMasterClockXingIn_a_bits_address; // @[MixedNode.scala:551:17]
assign auto_tl_master_clock_xing_out_a_bits_address_0 = tlMasterClockXingOut_a_bits_address; // @[ClockDomain.scala:14:9]
wire [7:0] tlMasterClockXingIn_a_bits_mask; // @[MixedNode.scala:551:17]
assign auto_tl_master_clock_xing_out_a_bits_mask_0 = tlMasterClockXingOut_a_bits_mask; // @[ClockDomain.scala:14:9]
wire [63:0] tlMasterClockXingIn_a_bits_data; // @[MixedNode.scala:551:17]
assign auto_tl_master_clock_xing_out_a_bits_data_0 = tlMasterClockXingOut_a_bits_data; // @[ClockDomain.scala:14:9]
wire tlMasterClockXingIn_a_bits_corrupt; // @[MixedNode.scala:551:17]
assign auto_tl_master_clock_xing_out_a_bits_corrupt_0 = tlMasterClockXingOut_a_bits_corrupt; // @[ClockDomain.scala:14:9]
wire tlMasterClockXingIn_b_ready; // @[MixedNode.scala:551:17]
assign auto_tl_master_clock_xing_out_b_ready_0 = tlMasterClockXingOut_b_ready; // @[ClockDomain.scala:14:9]
wire tlMasterClockXingIn_b_valid = tlMasterClockXingOut_b_valid; // @[MixedNode.scala:542:17, :551:17]
wire [1:0] tlMasterClockXingIn_b_bits_param = tlMasterClockXingOut_b_bits_param; // @[MixedNode.scala:542:17, :551:17]
wire [3:0] tlMasterClockXingIn_b_bits_source = tlMasterClockXingOut_b_bits_source; // @[MixedNode.scala:542:17, :551:17]
wire [31:0] tlMasterClockXingIn_b_bits_address = tlMasterClockXingOut_b_bits_address; // @[MixedNode.scala:542:17, :551:17]
wire tlMasterClockXingIn_c_ready = tlMasterClockXingOut_c_ready; // @[MixedNode.scala:542:17, :551:17]
wire tlMasterClockXingIn_c_valid; // @[MixedNode.scala:551:17]
assign auto_tl_master_clock_xing_out_c_valid_0 = tlMasterClockXingOut_c_valid; // @[ClockDomain.scala:14:9]
wire [2:0] tlMasterClockXingIn_c_bits_opcode; // @[MixedNode.scala:551:17]
assign auto_tl_master_clock_xing_out_c_bits_opcode_0 = tlMasterClockXingOut_c_bits_opcode; // @[ClockDomain.scala:14:9]
wire [2:0] tlMasterClockXingIn_c_bits_param; // @[MixedNode.scala:551:17]
assign auto_tl_master_clock_xing_out_c_bits_param_0 = tlMasterClockXingOut_c_bits_param; // @[ClockDomain.scala:14:9]
wire [3:0] tlMasterClockXingIn_c_bits_size; // @[MixedNode.scala:551:17]
assign auto_tl_master_clock_xing_out_c_bits_size_0 = tlMasterClockXingOut_c_bits_size; // @[ClockDomain.scala:14:9]
wire [3:0] tlMasterClockXingIn_c_bits_source; // @[MixedNode.scala:551:17]
assign auto_tl_master_clock_xing_out_c_bits_source_0 = tlMasterClockXingOut_c_bits_source; // @[ClockDomain.scala:14:9]
wire [31:0] tlMasterClockXingIn_c_bits_address; // @[MixedNode.scala:551:17]
assign auto_tl_master_clock_xing_out_c_bits_address_0 = tlMasterClockXingOut_c_bits_address; // @[ClockDomain.scala:14:9]
wire [63:0] tlMasterClockXingIn_c_bits_data; // @[MixedNode.scala:551:17]
assign auto_tl_master_clock_xing_out_c_bits_data_0 = tlMasterClockXingOut_c_bits_data; // @[ClockDomain.scala:14:9]
wire tlMasterClockXingIn_c_bits_corrupt; // @[MixedNode.scala:551:17]
assign auto_tl_master_clock_xing_out_c_bits_corrupt_0 = tlMasterClockXingOut_c_bits_corrupt; // @[ClockDomain.scala:14:9]
wire tlMasterClockXingIn_d_ready; // @[MixedNode.scala:551:17]
assign auto_tl_master_clock_xing_out_d_ready_0 = tlMasterClockXingOut_d_ready; // @[ClockDomain.scala:14:9]
wire tlMasterClockXingIn_d_valid = tlMasterClockXingOut_d_valid; // @[MixedNode.scala:542:17, :551:17]
wire [2:0] tlMasterClockXingIn_d_bits_opcode = tlMasterClockXingOut_d_bits_opcode; // @[MixedNode.scala:542:17, :551:17]
wire [1:0] tlMasterClockXingIn_d_bits_param = tlMasterClockXingOut_d_bits_param; // @[MixedNode.scala:542:17, :551:17]
wire [3:0] tlMasterClockXingIn_d_bits_size = tlMasterClockXingOut_d_bits_size; // @[MixedNode.scala:542:17, :551:17]
wire [3:0] tlMasterClockXingIn_d_bits_source = tlMasterClockXingOut_d_bits_source; // @[MixedNode.scala:542:17, :551:17]
wire [2:0] tlMasterClockXingIn_d_bits_sink = tlMasterClockXingOut_d_bits_sink; // @[MixedNode.scala:542:17, :551:17]
wire tlMasterClockXingIn_d_bits_denied = tlMasterClockXingOut_d_bits_denied; // @[MixedNode.scala:542:17, :551:17]
wire [63:0] tlMasterClockXingIn_d_bits_data = tlMasterClockXingOut_d_bits_data; // @[MixedNode.scala:542:17, :551:17]
wire tlMasterClockXingIn_d_bits_corrupt = tlMasterClockXingOut_d_bits_corrupt; // @[MixedNode.scala:542:17, :551:17]
wire tlMasterClockXingIn_e_ready = tlMasterClockXingOut_e_ready; // @[MixedNode.scala:542:17, :551:17]
wire tlMasterClockXingIn_e_valid; // @[MixedNode.scala:551:17]
assign auto_tl_master_clock_xing_out_e_valid_0 = tlMasterClockXingOut_e_valid; // @[ClockDomain.scala:14:9]
wire [2:0] tlMasterClockXingIn_e_bits_sink; // @[MixedNode.scala:551:17]
assign auto_tl_master_clock_xing_out_e_bits_sink_0 = tlMasterClockXingOut_e_bits_sink; // @[ClockDomain.scala:14:9]
assign tlMasterClockXingOut_a_valid = tlMasterClockXingIn_a_valid; // @[MixedNode.scala:542:17, :551:17]
assign tlMasterClockXingOut_a_bits_opcode = tlMasterClockXingIn_a_bits_opcode; // @[MixedNode.scala:542:17, :551:17]
assign tlMasterClockXingOut_a_bits_param = tlMasterClockXingIn_a_bits_param; // @[MixedNode.scala:542:17, :551:17]
assign tlMasterClockXingOut_a_bits_size = tlMasterClockXingIn_a_bits_size; // @[MixedNode.scala:542:17, :551:17]
assign tlMasterClockXingOut_a_bits_source = tlMasterClockXingIn_a_bits_source; // @[MixedNode.scala:542:17, :551:17]
assign tlMasterClockXingOut_a_bits_address = tlMasterClockXingIn_a_bits_address; // @[MixedNode.scala:542:17, :551:17]
assign tlMasterClockXingOut_a_bits_mask = tlMasterClockXingIn_a_bits_mask; // @[MixedNode.scala:542:17, :551:17]
assign tlMasterClockXingOut_a_bits_data = tlMasterClockXingIn_a_bits_data; // @[MixedNode.scala:542:17, :551:17]
assign tlMasterClockXingOut_a_bits_corrupt = tlMasterClockXingIn_a_bits_corrupt; // @[MixedNode.scala:542:17, :551:17]
assign tlMasterClockXingOut_b_ready = tlMasterClockXingIn_b_ready; // @[MixedNode.scala:542:17, :551:17]
assign tlMasterClockXingOut_c_valid = tlMasterClockXingIn_c_valid; // @[MixedNode.scala:542:17, :551:17]
assign tlMasterClockXingOut_c_bits_opcode = tlMasterClockXingIn_c_bits_opcode; // @[MixedNode.scala:542:17, :551:17]
assign tlMasterClockXingOut_c_bits_param = tlMasterClockXingIn_c_bits_param; // @[MixedNode.scala:542:17, :551:17]
assign tlMasterClockXingOut_c_bits_size = tlMasterClockXingIn_c_bits_size; // @[MixedNode.scala:542:17, :551:17]
assign tlMasterClockXingOut_c_bits_source = tlMasterClockXingIn_c_bits_source; // @[MixedNode.scala:542:17, :551:17]
assign tlMasterClockXingOut_c_bits_address = tlMasterClockXingIn_c_bits_address; // @[MixedNode.scala:542:17, :551:17]
assign tlMasterClockXingOut_c_bits_data = tlMasterClockXingIn_c_bits_data; // @[MixedNode.scala:542:17, :551:17]
assign tlMasterClockXingOut_c_bits_corrupt = tlMasterClockXingIn_c_bits_corrupt; // @[MixedNode.scala:542:17, :551:17]
assign tlMasterClockXingOut_d_ready = tlMasterClockXingIn_d_ready; // @[MixedNode.scala:542:17, :551:17]
assign tlMasterClockXingOut_e_valid = tlMasterClockXingIn_e_valid; // @[MixedNode.scala:542:17, :551:17]
assign tlMasterClockXingOut_e_bits_sink = tlMasterClockXingIn_e_bits_sink; // @[MixedNode.scala:542:17, :551:17]
wire intInClockXingOut_sync_0; // @[MixedNode.scala:542:17]
wire intInClockXingOut_sync_1; // @[MixedNode.scala:542:17]
assign intInClockXingOut_sync_0 = intInClockXingIn_sync_0; // @[MixedNode.scala:542:17, :551:17]
assign intInClockXingOut_sync_1 = intInClockXingIn_sync_1; // @[MixedNode.scala:542:17, :551:17]
wire intInClockXingOut_1_sync_0; // @[MixedNode.scala:542:17]
wire intInClockXingOut_1_sync_1; // @[MixedNode.scala:542:17]
assign intInClockXingOut_1_sync_0 = intInClockXingIn_1_sync_0; // @[MixedNode.scala:542:17, :551:17]
assign intInClockXingOut_1_sync_1 = intInClockXingIn_1_sync_1; // @[MixedNode.scala:542:17, :551:17]
wire intInClockXingOut_2_sync_0; // @[MixedNode.scala:542:17]
wire intInClockXingOut_2_sync_1; // @[MixedNode.scala:542:17]
assign intInClockXingOut_2_sync_0 = intInClockXingIn_2_sync_0; // @[MixedNode.scala:542:17, :551:17]
assign intInClockXingOut_2_sync_1 = intInClockXingIn_2_sync_1; // @[MixedNode.scala:542:17, :551:17]
wire intInClockXingOut_3_sync_0; // @[MixedNode.scala:542:17]
wire intInClockXingOut_3_sync_1; // @[MixedNode.scala:542:17]
assign intInClockXingOut_3_sync_0 = intInClockXingIn_3_sync_0; // @[MixedNode.scala:542:17, :551:17]
assign intInClockXingOut_3_sync_1 = intInClockXingIn_3_sync_1; // @[MixedNode.scala:542:17, :551:17]
wire intInClockXingOut_4_sync_0; // @[MixedNode.scala:542:17]
assign intInClockXingOut_4_sync_0 = intInClockXingIn_4_sync_0; // @[MixedNode.scala:542:17, :551:17]
wire intInClockXingOut_5_sync_0; // @[MixedNode.scala:542:17]
assign intInClockXingOut_5_sync_0 = intInClockXingIn_5_sync_0; // @[MixedNode.scala:542:17, :551:17]
wire intInClockXingOut_6_sync_0; // @[MixedNode.scala:542:17]
assign intInClockXingOut_6_sync_0 = intInClockXingIn_6_sync_0; // @[MixedNode.scala:542:17, :551:17]
wire intInClockXingOut_7_sync_0; // @[MixedNode.scala:542:17]
assign intInClockXingOut_7_sync_0 = intInClockXingIn_7_sync_0; // @[MixedNode.scala:542:17, :551:17]
wire intInClockXingOut_8_sync_0; // @[MixedNode.scala:542:17]
assign intInClockXingOut_8_sync_0 = intInClockXingIn_8_sync_0; // @[MixedNode.scala:542:17, :551:17]
wire intInClockXingOut_9_sync_0; // @[MixedNode.scala:542:17]
assign intInClockXingOut_9_sync_0 = intInClockXingIn_9_sync_0; // @[MixedNode.scala:542:17, :551:17]
wire intInClockXingOut_10_sync_0; // @[MixedNode.scala:542:17]
assign intInClockXingOut_10_sync_0 = intInClockXingIn_10_sync_0; // @[MixedNode.scala:542:17, :551:17]
wire intInClockXingOut_11_sync_0; // @[MixedNode.scala:542:17]
assign intInClockXingOut_11_sync_0 = intInClockXingIn_11_sync_0; // @[MixedNode.scala:542:17, :551:17]
wire intOutClockXingIn_sync_0; // @[MixedNode.scala:551:17]
assign auto_int_out_clock_xing_out_0_sync_0_0 = intOutClockXingOut_sync_0; // @[ClockDomain.scala:14:9]
wire intOutClockXingIn_sync_1; // @[MixedNode.scala:551:17]
assign auto_int_out_clock_xing_out_0_sync_1_0 = intOutClockXingOut_sync_1; // @[ClockDomain.scala:14:9]
wire intOutClockXingIn_sync_2; // @[MixedNode.scala:551:17]
assign auto_int_out_clock_xing_out_0_sync_2_0 = intOutClockXingOut_sync_2; // @[ClockDomain.scala:14:9]
wire intOutClockXingIn_sync_3; // @[MixedNode.scala:551:17]
assign auto_int_out_clock_xing_out_0_sync_3_0 = intOutClockXingOut_sync_3; // @[ClockDomain.scala:14:9]
wire intOutClockXingIn_sync_4; // @[MixedNode.scala:551:17]
assign auto_int_out_clock_xing_out_0_sync_4_0 = intOutClockXingOut_sync_4; // @[ClockDomain.scala:14:9]
assign intOutClockXingOut_sync_0 = intOutClockXingIn_sync_0; // @[MixedNode.scala:542:17, :551:17]
assign intOutClockXingOut_sync_1 = intOutClockXingIn_sync_1; // @[MixedNode.scala:542:17, :551:17]
assign intOutClockXingOut_sync_2 = intOutClockXingIn_sync_2; // @[MixedNode.scala:542:17, :551:17]
assign intOutClockXingOut_sync_3 = intOutClockXingIn_sync_3; // @[MixedNode.scala:542:17, :551:17]
assign intOutClockXingOut_sync_4 = intOutClockXingIn_sync_4; // @[MixedNode.scala:542:17, :551:17]
wire intOutClockXingIn_1_sync_0; // @[MixedNode.scala:551:17]
assign auto_int_out_clock_xing_out_1_sync_0_0 = intOutClockXingOut_1_sync_0; // @[ClockDomain.scala:14:9]
wire intOutClockXingIn_1_sync_1; // @[MixedNode.scala:551:17]
assign auto_int_out_clock_xing_out_1_sync_1_0 = intOutClockXingOut_1_sync_1; // @[ClockDomain.scala:14:9]
wire intOutClockXingIn_1_sync_2; // @[MixedNode.scala:551:17]
assign auto_int_out_clock_xing_out_1_sync_2_0 = intOutClockXingOut_1_sync_2; // @[ClockDomain.scala:14:9]
wire intOutClockXingIn_1_sync_3; // @[MixedNode.scala:551:17]
assign auto_int_out_clock_xing_out_1_sync_3_0 = intOutClockXingOut_1_sync_3; // @[ClockDomain.scala:14:9]
wire intOutClockXingIn_1_sync_4; // @[MixedNode.scala:551:17]
assign auto_int_out_clock_xing_out_1_sync_4_0 = intOutClockXingOut_1_sync_4; // @[ClockDomain.scala:14:9]
assign intOutClockXingOut_1_sync_0 = intOutClockXingIn_1_sync_0; // @[MixedNode.scala:542:17, :551:17]
assign intOutClockXingOut_1_sync_1 = intOutClockXingIn_1_sync_1; // @[MixedNode.scala:542:17, :551:17]
assign intOutClockXingOut_1_sync_2 = intOutClockXingIn_1_sync_2; // @[MixedNode.scala:542:17, :551:17]
assign intOutClockXingOut_1_sync_3 = intOutClockXingIn_1_sync_3; // @[MixedNode.scala:542:17, :551:17]
assign intOutClockXingOut_1_sync_4 = intOutClockXingIn_1_sync_4; // @[MixedNode.scala:542:17, :551:17]
wire intOutClockXingIn_2_sync_0; // @[MixedNode.scala:551:17]
assign auto_int_out_clock_xing_out_2_sync_0_0 = intOutClockXingOut_2_sync_0; // @[ClockDomain.scala:14:9]
wire intOutClockXingIn_2_sync_1; // @[MixedNode.scala:551:17]
assign auto_int_out_clock_xing_out_2_sync_1_0 = intOutClockXingOut_2_sync_1; // @[ClockDomain.scala:14:9]
wire intOutClockXingIn_2_sync_2; // @[MixedNode.scala:551:17]
assign auto_int_out_clock_xing_out_2_sync_2_0 = intOutClockXingOut_2_sync_2; // @[ClockDomain.scala:14:9]
wire intOutClockXingIn_2_sync_3; // @[MixedNode.scala:551:17]
assign auto_int_out_clock_xing_out_2_sync_3_0 = intOutClockXingOut_2_sync_3; // @[ClockDomain.scala:14:9]
wire intOutClockXingIn_2_sync_4; // @[MixedNode.scala:551:17]
assign auto_int_out_clock_xing_out_2_sync_4_0 = intOutClockXingOut_2_sync_4; // @[ClockDomain.scala:14:9]
assign intOutClockXingOut_2_sync_0 = intOutClockXingIn_2_sync_0; // @[MixedNode.scala:542:17, :551:17]
assign intOutClockXingOut_2_sync_1 = intOutClockXingIn_2_sync_1; // @[MixedNode.scala:542:17, :551:17]
assign intOutClockXingOut_2_sync_2 = intOutClockXingIn_2_sync_2; // @[MixedNode.scala:542:17, :551:17]
assign intOutClockXingOut_2_sync_3 = intOutClockXingIn_2_sync_3; // @[MixedNode.scala:542:17, :551:17]
assign intOutClockXingOut_2_sync_4 = intOutClockXingIn_2_sync_4; // @[MixedNode.scala:542:17, :551:17]
Cluster element_reset_domain_element ( // @[Cluster.scala:99:15]
.clock (element_reset_domain_childClock), // @[LazyModuleImp.scala:155:31]
.reset (element_reset_domain_childReset), // @[LazyModuleImp.scala:158:31]
.auto_buffer_out_a_ready (element_reset_domain_auto_element_buffer_out_a_ready), // @[ClockDomain.scala:14:9]
.auto_buffer_out_a_valid (element_reset_domain_auto_element_buffer_out_a_valid),
.auto_buffer_out_a_bits_opcode (element_reset_domain_auto_element_buffer_out_a_bits_opcode),
.auto_buffer_out_a_bits_param (element_reset_domain_auto_element_buffer_out_a_bits_param),
.auto_buffer_out_a_bits_size (element_reset_domain_auto_element_buffer_out_a_bits_size),
.auto_buffer_out_a_bits_source (element_reset_domain_auto_element_buffer_out_a_bits_source),
.auto_buffer_out_a_bits_address (element_reset_domain_auto_element_buffer_out_a_bits_address),
.auto_buffer_out_a_bits_mask (element_reset_domain_auto_element_buffer_out_a_bits_mask),
.auto_buffer_out_a_bits_data (element_reset_domain_auto_element_buffer_out_a_bits_data),
.auto_buffer_out_a_bits_corrupt (element_reset_domain_auto_element_buffer_out_a_bits_corrupt),
.auto_buffer_out_b_ready (element_reset_domain_auto_element_buffer_out_b_ready),
.auto_buffer_out_b_valid (element_reset_domain_auto_element_buffer_out_b_valid), // @[ClockDomain.scala:14:9]
.auto_buffer_out_b_bits_opcode (element_reset_domain_auto_element_buffer_out_b_bits_opcode), // @[ClockDomain.scala:14:9]
.auto_buffer_out_b_bits_param (element_reset_domain_auto_element_buffer_out_b_bits_param), // @[ClockDomain.scala:14:9]
.auto_buffer_out_b_bits_size (element_reset_domain_auto_element_buffer_out_b_bits_size), // @[ClockDomain.scala:14:9]
.auto_buffer_out_b_bits_source (element_reset_domain_auto_element_buffer_out_b_bits_source), // @[ClockDomain.scala:14:9]
.auto_buffer_out_b_bits_address (element_reset_domain_auto_element_buffer_out_b_bits_address), // @[ClockDomain.scala:14:9]
.auto_buffer_out_b_bits_mask (element_reset_domain_auto_element_buffer_out_b_bits_mask), // @[ClockDomain.scala:14:9]
.auto_buffer_out_b_bits_data (element_reset_domain_auto_element_buffer_out_b_bits_data), // @[ClockDomain.scala:14:9]
.auto_buffer_out_b_bits_corrupt (element_reset_domain_auto_element_buffer_out_b_bits_corrupt), // @[ClockDomain.scala:14:9]
.auto_buffer_out_c_ready (element_reset_domain_auto_element_buffer_out_c_ready), // @[ClockDomain.scala:14:9]
.auto_buffer_out_c_valid (element_reset_domain_auto_element_buffer_out_c_valid),
.auto_buffer_out_c_bits_opcode (element_reset_domain_auto_element_buffer_out_c_bits_opcode),
.auto_buffer_out_c_bits_param (element_reset_domain_auto_element_buffer_out_c_bits_param),
.auto_buffer_out_c_bits_size (element_reset_domain_auto_element_buffer_out_c_bits_size),
.auto_buffer_out_c_bits_source (element_reset_domain_auto_element_buffer_out_c_bits_source),
.auto_buffer_out_c_bits_address (element_reset_domain_auto_element_buffer_out_c_bits_address),
.auto_buffer_out_c_bits_data (element_reset_domain_auto_element_buffer_out_c_bits_data),
.auto_buffer_out_c_bits_corrupt (element_reset_domain_auto_element_buffer_out_c_bits_corrupt),
.auto_buffer_out_d_ready (element_reset_domain_auto_element_buffer_out_d_ready),
.auto_buffer_out_d_valid (element_reset_domain_auto_element_buffer_out_d_valid), // @[ClockDomain.scala:14:9]
.auto_buffer_out_d_bits_opcode (element_reset_domain_auto_element_buffer_out_d_bits_opcode), // @[ClockDomain.scala:14:9]
.auto_buffer_out_d_bits_param (element_reset_domain_auto_element_buffer_out_d_bits_param), // @[ClockDomain.scala:14:9]
.auto_buffer_out_d_bits_size (element_reset_domain_auto_element_buffer_out_d_bits_size), // @[ClockDomain.scala:14:9]
.auto_buffer_out_d_bits_source (element_reset_domain_auto_element_buffer_out_d_bits_source), // @[ClockDomain.scala:14:9]
.auto_buffer_out_d_bits_sink (element_reset_domain_auto_element_buffer_out_d_bits_sink), // @[ClockDomain.scala:14:9]
.auto_buffer_out_d_bits_denied (element_reset_domain_auto_element_buffer_out_d_bits_denied), // @[ClockDomain.scala:14:9]
.auto_buffer_out_d_bits_data (element_reset_domain_auto_element_buffer_out_d_bits_data), // @[ClockDomain.scala:14:9]
.auto_buffer_out_d_bits_corrupt (element_reset_domain_auto_element_buffer_out_d_bits_corrupt), // @[ClockDomain.scala:14:9]
.auto_buffer_out_e_ready (element_reset_domain_auto_element_buffer_out_e_ready), // @[ClockDomain.scala:14:9]
.auto_buffer_out_e_valid (element_reset_domain_auto_element_buffer_out_e_valid),
.auto_buffer_out_e_bits_sink (element_reset_domain_auto_element_buffer_out_e_bits_sink),
.auto_xbar_anon_out_1_0 (element_reset_domain_auto_element_xbar_anon_out_1_0),
.auto_xbar_anon_out_1_1 (element_reset_domain_auto_element_xbar_anon_out_1_1),
.auto_xbar_anon_out_1_2 (element_reset_domain_auto_element_xbar_anon_out_1_2),
.auto_xbar_anon_out_1_3 (element_reset_domain_auto_element_xbar_anon_out_1_3),
.auto_csbus0_fixedClockNode_anon_out_clock (element_reset_domain_auto_element_csbus0_fixedClockNode_anon_out_clock),
.auto_csbus0_fixedClockNode_anon_out_reset (element_reset_domain_auto_element_csbus0_fixedClockNode_anon_out_reset),
.auto_all_clock_groups_in_1_member_ccbus0_0_clock (element_reset_domain_auto_element_all_clock_groups_in_1_member_ccbus0_0_clock), // @[ClockDomain.scala:14:9]
.auto_all_clock_groups_in_1_member_ccbus0_0_reset (element_reset_domain_auto_element_all_clock_groups_in_1_member_ccbus0_0_reset), // @[ClockDomain.scala:14:9]
.auto_all_clock_groups_in_0_member_csbus0_0_clock (element_reset_domain_auto_element_all_clock_groups_in_0_member_csbus0_0_clock), // @[ClockDomain.scala:14:9]
.auto_all_clock_groups_in_0_member_csbus0_0_reset (element_reset_domain_auto_element_all_clock_groups_in_0_member_csbus0_0_reset), // @[ClockDomain.scala:14:9]
.auto_trace_nodes_out_3_insns_0_valid (element_reset_domain_auto_element_trace_nodes_out_3_insns_0_valid),
.auto_trace_nodes_out_3_insns_0_iaddr (element_reset_domain_auto_element_trace_nodes_out_3_insns_0_iaddr),
.auto_trace_nodes_out_3_insns_0_insn (element_reset_domain_auto_element_trace_nodes_out_3_insns_0_insn),
.auto_trace_nodes_out_3_insns_0_priv (element_reset_domain_auto_element_trace_nodes_out_3_insns_0_priv),
.auto_trace_nodes_out_3_insns_0_exception (element_reset_domain_auto_element_trace_nodes_out_3_insns_0_exception),
.auto_trace_nodes_out_3_insns_0_interrupt (element_reset_domain_auto_element_trace_nodes_out_3_insns_0_interrupt),
.auto_trace_nodes_out_3_insns_0_cause (element_reset_domain_auto_element_trace_nodes_out_3_insns_0_cause),
.auto_trace_nodes_out_3_insns_0_tval (element_reset_domain_auto_element_trace_nodes_out_3_insns_0_tval),
.auto_trace_nodes_out_3_time (element_reset_domain_auto_element_trace_nodes_out_3_time),
.auto_trace_nodes_out_2_insns_0_valid (element_reset_domain_auto_element_trace_nodes_out_2_insns_0_valid),
.auto_trace_nodes_out_2_insns_0_iaddr (element_reset_domain_auto_element_trace_nodes_out_2_insns_0_iaddr),
.auto_trace_nodes_out_2_insns_0_insn (element_reset_domain_auto_element_trace_nodes_out_2_insns_0_insn),
.auto_trace_nodes_out_2_insns_0_priv (element_reset_domain_auto_element_trace_nodes_out_2_insns_0_priv),
.auto_trace_nodes_out_2_insns_0_exception (element_reset_domain_auto_element_trace_nodes_out_2_insns_0_exception),
.auto_trace_nodes_out_2_insns_0_interrupt (element_reset_domain_auto_element_trace_nodes_out_2_insns_0_interrupt),
.auto_trace_nodes_out_2_insns_0_cause (element_reset_domain_auto_element_trace_nodes_out_2_insns_0_cause),
.auto_trace_nodes_out_2_insns_0_tval (element_reset_domain_auto_element_trace_nodes_out_2_insns_0_tval),
.auto_trace_nodes_out_2_time (element_reset_domain_auto_element_trace_nodes_out_2_time),
.auto_trace_nodes_out_1_insns_0_valid (element_reset_domain_auto_element_trace_nodes_out_1_insns_0_valid),
.auto_trace_nodes_out_1_insns_0_iaddr (element_reset_domain_auto_element_trace_nodes_out_1_insns_0_iaddr),
.auto_trace_nodes_out_1_insns_0_insn (element_reset_domain_auto_element_trace_nodes_out_1_insns_0_insn),
.auto_trace_nodes_out_1_insns_0_priv (element_reset_domain_auto_element_trace_nodes_out_1_insns_0_priv),
.auto_trace_nodes_out_1_insns_0_exception (element_reset_domain_auto_element_trace_nodes_out_1_insns_0_exception),
.auto_trace_nodes_out_1_insns_0_interrupt (element_reset_domain_auto_element_trace_nodes_out_1_insns_0_interrupt),
.auto_trace_nodes_out_1_insns_0_cause (element_reset_domain_auto_element_trace_nodes_out_1_insns_0_cause),
.auto_trace_nodes_out_1_insns_0_tval (element_reset_domain_auto_element_trace_nodes_out_1_insns_0_tval),
.auto_trace_nodes_out_1_time (element_reset_domain_auto_element_trace_nodes_out_1_time),
.auto_trace_nodes_out_0_insns_0_valid (element_reset_domain_auto_element_trace_nodes_out_0_insns_0_valid),
.auto_trace_nodes_out_0_insns_0_iaddr (element_reset_domain_auto_element_trace_nodes_out_0_insns_0_iaddr),
.auto_trace_nodes_out_0_insns_0_insn (element_reset_domain_auto_element_trace_nodes_out_0_insns_0_insn),
.auto_trace_nodes_out_0_insns_0_priv (element_reset_domain_auto_element_trace_nodes_out_0_insns_0_priv),
.auto_trace_nodes_out_0_insns_0_exception (element_reset_domain_auto_element_trace_nodes_out_0_insns_0_exception),
.auto_trace_nodes_out_0_insns_0_interrupt (element_reset_domain_auto_element_trace_nodes_out_0_insns_0_interrupt),
.auto_trace_nodes_out_0_insns_0_cause (element_reset_domain_auto_element_trace_nodes_out_0_insns_0_cause),
.auto_trace_nodes_out_0_insns_0_tval (element_reset_domain_auto_element_trace_nodes_out_0_insns_0_tval),
.auto_trace_nodes_out_0_time (element_reset_domain_auto_element_trace_nodes_out_0_time),
.auto_tile_hart_id_nodes_in_3 (element_reset_domain_auto_element_tile_hart_id_nodes_in_3), // @[ClockDomain.scala:14:9]
.auto_tile_hart_id_nodes_in_2 (element_reset_domain_auto_element_tile_hart_id_nodes_in_2), // @[ClockDomain.scala:14:9]
.auto_tile_hart_id_nodes_in_1 (element_reset_domain_auto_element_tile_hart_id_nodes_in_1), // @[ClockDomain.scala:14:9]
.auto_tile_hart_id_nodes_in_0 (element_reset_domain_auto_element_tile_hart_id_nodes_in_0), // @[ClockDomain.scala:14:9]
.auto_seip_nodes_in_3_0 (element_reset_domain_auto_element_seip_nodes_in_3_0), // @[ClockDomain.scala:14:9]
.auto_seip_nodes_in_2_0 (element_reset_domain_auto_element_seip_nodes_in_2_0), // @[ClockDomain.scala:14:9]
.auto_seip_nodes_in_1_0 (element_reset_domain_auto_element_seip_nodes_in_1_0), // @[ClockDomain.scala:14:9]
.auto_seip_nodes_in_0_0 (element_reset_domain_auto_element_seip_nodes_in_0_0), // @[ClockDomain.scala:14:9]
.auto_meip_nodes_in_3_0 (element_reset_domain_auto_element_meip_nodes_in_3_0), // @[ClockDomain.scala:14:9]
.auto_meip_nodes_in_2_0 (element_reset_domain_auto_element_meip_nodes_in_2_0), // @[ClockDomain.scala:14:9]
.auto_meip_nodes_in_1_0 (element_reset_domain_auto_element_meip_nodes_in_1_0), // @[ClockDomain.scala:14:9]
.auto_meip_nodes_in_0_0 (element_reset_domain_auto_element_meip_nodes_in_0_0), // @[ClockDomain.scala:14:9]
.auto_msip_nodes_in_3_0 (element_reset_domain_auto_element_msip_nodes_in_3_0), // @[ClockDomain.scala:14:9]
.auto_msip_nodes_in_3_1 (element_reset_domain_auto_element_msip_nodes_in_3_1), // @[ClockDomain.scala:14:9]
.auto_msip_nodes_in_2_0 (element_reset_domain_auto_element_msip_nodes_in_2_0), // @[ClockDomain.scala:14:9]
.auto_msip_nodes_in_2_1 (element_reset_domain_auto_element_msip_nodes_in_2_1), // @[ClockDomain.scala:14:9]
.auto_msip_nodes_in_1_0 (element_reset_domain_auto_element_msip_nodes_in_1_0), // @[ClockDomain.scala:14:9]
.auto_msip_nodes_in_1_1 (element_reset_domain_auto_element_msip_nodes_in_1_1), // @[ClockDomain.scala:14:9]
.auto_msip_nodes_in_0_0 (element_reset_domain_auto_element_msip_nodes_in_0_0), // @[ClockDomain.scala:14:9]
.auto_msip_nodes_in_0_1 (element_reset_domain_auto_element_msip_nodes_in_0_1), // @[ClockDomain.scala:14:9]
.auto_debug_nodes_in_3_sync_0 (element_reset_domain_auto_element_debug_nodes_in_3_sync_0), // @[ClockDomain.scala:14:9]
.auto_debug_nodes_in_2_sync_0 (element_reset_domain_auto_element_debug_nodes_in_2_sync_0), // @[ClockDomain.scala:14:9]
.auto_debug_nodes_in_1_sync_0 (element_reset_domain_auto_element_debug_nodes_in_1_sync_0), // @[ClockDomain.scala:14:9]
.auto_debug_nodes_in_0_sync_0 (element_reset_domain_auto_element_debug_nodes_in_0_sync_0) // @[ClockDomain.scala:14:9]
); // @[Cluster.scala:99:15]
TLBuffer_a32d64s4k3z4c_1 buffer ( // @[Buffer.scala:75:28]
.clock (childClock), // @[LazyModuleImp.scala:155:31]
.reset (childReset), // @[LazyModuleImp.scala:158:31]
.auto_in_a_ready (element_reset_domain_auto_element_buffer_out_a_ready),
.auto_in_a_valid (element_reset_domain_auto_element_buffer_out_a_valid), // @[ClockDomain.scala:14:9]
.auto_in_a_bits_opcode (element_reset_domain_auto_element_buffer_out_a_bits_opcode), // @[ClockDomain.scala:14:9]
.auto_in_a_bits_param (element_reset_domain_auto_element_buffer_out_a_bits_param), // @[ClockDomain.scala:14:9]
.auto_in_a_bits_size (element_reset_domain_auto_element_buffer_out_a_bits_size), // @[ClockDomain.scala:14:9]
.auto_in_a_bits_source (element_reset_domain_auto_element_buffer_out_a_bits_source), // @[ClockDomain.scala:14:9]
.auto_in_a_bits_address (element_reset_domain_auto_element_buffer_out_a_bits_address), // @[ClockDomain.scala:14:9]
.auto_in_a_bits_mask (element_reset_domain_auto_element_buffer_out_a_bits_mask), // @[ClockDomain.scala:14:9]
.auto_in_a_bits_data (element_reset_domain_auto_element_buffer_out_a_bits_data), // @[ClockDomain.scala:14:9]
.auto_in_a_bits_corrupt (element_reset_domain_auto_element_buffer_out_a_bits_corrupt), // @[ClockDomain.scala:14:9]
.auto_in_b_ready (element_reset_domain_auto_element_buffer_out_b_ready), // @[ClockDomain.scala:14:9]
.auto_in_b_valid (element_reset_domain_auto_element_buffer_out_b_valid),
.auto_in_b_bits_opcode (element_reset_domain_auto_element_buffer_out_b_bits_opcode),
.auto_in_b_bits_param (element_reset_domain_auto_element_buffer_out_b_bits_param),
.auto_in_b_bits_size (element_reset_domain_auto_element_buffer_out_b_bits_size),
.auto_in_b_bits_source (element_reset_domain_auto_element_buffer_out_b_bits_source),
.auto_in_b_bits_address (element_reset_domain_auto_element_buffer_out_b_bits_address),
.auto_in_b_bits_mask (element_reset_domain_auto_element_buffer_out_b_bits_mask),
.auto_in_b_bits_data (element_reset_domain_auto_element_buffer_out_b_bits_data),
.auto_in_b_bits_corrupt (element_reset_domain_auto_element_buffer_out_b_bits_corrupt),
.auto_in_c_ready (element_reset_domain_auto_element_buffer_out_c_ready),
.auto_in_c_valid (element_reset_domain_auto_element_buffer_out_c_valid), // @[ClockDomain.scala:14:9]
.auto_in_c_bits_opcode (element_reset_domain_auto_element_buffer_out_c_bits_opcode), // @[ClockDomain.scala:14:9]
.auto_in_c_bits_param (element_reset_domain_auto_element_buffer_out_c_bits_param), // @[ClockDomain.scala:14:9]
.auto_in_c_bits_size (element_reset_domain_auto_element_buffer_out_c_bits_size), // @[ClockDomain.scala:14:9]
.auto_in_c_bits_source (element_reset_domain_auto_element_buffer_out_c_bits_source), // @[ClockDomain.scala:14:9]
.auto_in_c_bits_address (element_reset_domain_auto_element_buffer_out_c_bits_address), // @[ClockDomain.scala:14:9]
.auto_in_c_bits_data (element_reset_domain_auto_element_buffer_out_c_bits_data), // @[ClockDomain.scala:14:9]
.auto_in_c_bits_corrupt (element_reset_domain_auto_element_buffer_out_c_bits_corrupt), // @[ClockDomain.scala:14:9]
.auto_in_d_ready (element_reset_domain_auto_element_buffer_out_d_ready), // @[ClockDomain.scala:14:9]
.auto_in_d_valid (element_reset_domain_auto_element_buffer_out_d_valid),
.auto_in_d_bits_opcode (element_reset_domain_auto_element_buffer_out_d_bits_opcode),
.auto_in_d_bits_param (element_reset_domain_auto_element_buffer_out_d_bits_param),
.auto_in_d_bits_size (element_reset_domain_auto_element_buffer_out_d_bits_size),
.auto_in_d_bits_source (element_reset_domain_auto_element_buffer_out_d_bits_source),
.auto_in_d_bits_sink (element_reset_domain_auto_element_buffer_out_d_bits_sink),
.auto_in_d_bits_denied (element_reset_domain_auto_element_buffer_out_d_bits_denied),
.auto_in_d_bits_data (element_reset_domain_auto_element_buffer_out_d_bits_data),
.auto_in_d_bits_corrupt (element_reset_domain_auto_element_buffer_out_d_bits_corrupt),
.auto_in_e_ready (element_reset_domain_auto_element_buffer_out_e_ready),
.auto_in_e_valid (element_reset_domain_auto_element_buffer_out_e_valid), // @[ClockDomain.scala:14:9]
.auto_in_e_bits_sink (element_reset_domain_auto_element_buffer_out_e_bits_sink), // @[ClockDomain.scala:14:9]
.auto_out_a_ready (tlMasterClockXingIn_a_ready), // @[MixedNode.scala:551:17]
.auto_out_a_valid (tlMasterClockXingIn_a_valid),
.auto_out_a_bits_opcode (tlMasterClockXingIn_a_bits_opcode),
.auto_out_a_bits_param (tlMasterClockXingIn_a_bits_param),
.auto_out_a_bits_size (tlMasterClockXingIn_a_bits_size),
.auto_out_a_bits_source (tlMasterClockXingIn_a_bits_source),
.auto_out_a_bits_address (tlMasterClockXingIn_a_bits_address),
.auto_out_a_bits_mask (tlMasterClockXingIn_a_bits_mask),
.auto_out_a_bits_data (tlMasterClockXingIn_a_bits_data),
.auto_out_a_bits_corrupt (tlMasterClockXingIn_a_bits_corrupt),
.auto_out_b_ready (tlMasterClockXingIn_b_ready),
.auto_out_b_valid (tlMasterClockXingIn_b_valid), // @[MixedNode.scala:551:17]
.auto_out_b_bits_param (tlMasterClockXingIn_b_bits_param), // @[MixedNode.scala:551:17]
.auto_out_b_bits_source (tlMasterClockXingIn_b_bits_source), // @[MixedNode.scala:551:17]
.auto_out_b_bits_address (tlMasterClockXingIn_b_bits_address), // @[MixedNode.scala:551:17]
.auto_out_c_ready (tlMasterClockXingIn_c_ready), // @[MixedNode.scala:551:17]
.auto_out_c_valid (tlMasterClockXingIn_c_valid),
.auto_out_c_bits_opcode (tlMasterClockXingIn_c_bits_opcode),
.auto_out_c_bits_param (tlMasterClockXingIn_c_bits_param),
.auto_out_c_bits_size (tlMasterClockXingIn_c_bits_size),
.auto_out_c_bits_source (tlMasterClockXingIn_c_bits_source),
.auto_out_c_bits_address (tlMasterClockXingIn_c_bits_address),
.auto_out_c_bits_data (tlMasterClockXingIn_c_bits_data),
.auto_out_c_bits_corrupt (tlMasterClockXingIn_c_bits_corrupt),
.auto_out_d_ready (tlMasterClockXingIn_d_ready),
.auto_out_d_valid (tlMasterClockXingIn_d_valid), // @[MixedNode.scala:551:17]
.auto_out_d_bits_opcode (tlMasterClockXingIn_d_bits_opcode), // @[MixedNode.scala:551:17]
.auto_out_d_bits_param (tlMasterClockXingIn_d_bits_param), // @[MixedNode.scala:551:17]
.auto_out_d_bits_size (tlMasterClockXingIn_d_bits_size), // @[MixedNode.scala:551:17]
.auto_out_d_bits_source (tlMasterClockXingIn_d_bits_source), // @[MixedNode.scala:551:17]
.auto_out_d_bits_sink (tlMasterClockXingIn_d_bits_sink), // @[MixedNode.scala:551:17]
.auto_out_d_bits_denied (tlMasterClockXingIn_d_bits_denied), // @[MixedNode.scala:551:17]
.auto_out_d_bits_data (tlMasterClockXingIn_d_bits_data), // @[MixedNode.scala:551:17]
.auto_out_d_bits_corrupt (tlMasterClockXingIn_d_bits_corrupt), // @[MixedNode.scala:551:17]
.auto_out_e_ready (tlMasterClockXingIn_e_ready), // @[MixedNode.scala:551:17]
.auto_out_e_valid (tlMasterClockXingIn_e_valid),
.auto_out_e_bits_sink (tlMasterClockXingIn_e_bits_sink)
); // @[Buffer.scala:75:28]
TLBuffer_11 buffer_1 ( // @[Buffer.scala:75:28]
.clock (childClock), // @[LazyModuleImp.scala:155:31]
.reset (childReset) // @[LazyModuleImp.scala:158:31]
); // @[Buffer.scala:75:28]
IntSyncSyncCrossingSink_n1x2_4 intsink ( // @[Crossing.scala:109:29]
.auto_in_sync_0 (intInClockXingOut_sync_0), // @[MixedNode.scala:542:17]
.auto_in_sync_1 (intInClockXingOut_sync_1), // @[MixedNode.scala:542:17]
.auto_out_0 (element_reset_domain_auto_element_msip_nodes_in_0_0),
.auto_out_1 (element_reset_domain_auto_element_msip_nodes_in_0_1)
); // @[Crossing.scala:109:29]
IntSyncSyncCrossingSink_n1x2_5 intsink_1 ( // @[Crossing.scala:109:29]
.auto_in_sync_0 (intInClockXingOut_1_sync_0), // @[MixedNode.scala:542:17]
.auto_in_sync_1 (intInClockXingOut_1_sync_1), // @[MixedNode.scala:542:17]
.auto_out_0 (element_reset_domain_auto_element_msip_nodes_in_1_0),
.auto_out_1 (element_reset_domain_auto_element_msip_nodes_in_1_1)
); // @[Crossing.scala:109:29]
IntSyncSyncCrossingSink_n1x2_6 intsink_2 ( // @[Crossing.scala:109:29]
.auto_in_sync_0 (intInClockXingOut_2_sync_0), // @[MixedNode.scala:542:17]
.auto_in_sync_1 (intInClockXingOut_2_sync_1), // @[MixedNode.scala:542:17]
.auto_out_0 (element_reset_domain_auto_element_msip_nodes_in_2_0),
.auto_out_1 (element_reset_domain_auto_element_msip_nodes_in_2_1)
); // @[Crossing.scala:109:29]
IntSyncSyncCrossingSink_n1x2_7 intsink_3 ( // @[Crossing.scala:109:29]
.auto_in_sync_0 (intInClockXingOut_3_sync_0), // @[MixedNode.scala:542:17]
.auto_in_sync_1 (intInClockXingOut_3_sync_1), // @[MixedNode.scala:542:17]
.auto_out_0 (element_reset_domain_auto_element_msip_nodes_in_3_0),
.auto_out_1 (element_reset_domain_auto_element_msip_nodes_in_3_1)
); // @[Crossing.scala:109:29]
IntSyncSyncCrossingSink_n1x1_20 intsink_4 ( // @[Crossing.scala:109:29]
.auto_in_sync_0 (intInClockXingOut_4_sync_0), // @[MixedNode.scala:542:17]
.auto_out_0 (element_reset_domain_auto_element_meip_nodes_in_0_0)
); // @[Crossing.scala:109:29]
IntSyncSyncCrossingSink_n1x1_21 intsink_5 ( // @[Crossing.scala:109:29]
.auto_in_sync_0 (intInClockXingOut_5_sync_0), // @[MixedNode.scala:542:17]
.auto_out_0 (element_reset_domain_auto_element_meip_nodes_in_1_0)
); // @[Crossing.scala:109:29]
IntSyncSyncCrossingSink_n1x1_22 intsink_6 ( // @[Crossing.scala:109:29]
.auto_in_sync_0 (intInClockXingOut_6_sync_0), // @[MixedNode.scala:542:17]
.auto_out_0 (element_reset_domain_auto_element_meip_nodes_in_2_0)
); // @[Crossing.scala:109:29]
IntSyncSyncCrossingSink_n1x1_23 intsink_7 ( // @[Crossing.scala:109:29]
.auto_in_sync_0 (intInClockXingOut_7_sync_0), // @[MixedNode.scala:542:17]
.auto_out_0 (element_reset_domain_auto_element_meip_nodes_in_3_0)
); // @[Crossing.scala:109:29]
IntSyncSyncCrossingSink_n1x1_24 intsink_8 ( // @[Crossing.scala:109:29]
.auto_in_sync_0 (intInClockXingOut_8_sync_0), // @[MixedNode.scala:542:17]
.auto_out_0 (element_reset_domain_auto_element_seip_nodes_in_0_0)
); // @[Crossing.scala:109:29]
IntSyncSyncCrossingSink_n1x1_25 intsink_9 ( // @[Crossing.scala:109:29]
.auto_in_sync_0 (intInClockXingOut_9_sync_0), // @[MixedNode.scala:542:17]
.auto_out_0 (element_reset_domain_auto_element_seip_nodes_in_1_0)
); // @[Crossing.scala:109:29]
IntSyncSyncCrossingSink_n1x1_26 intsink_10 ( // @[Crossing.scala:109:29]
.auto_in_sync_0 (intInClockXingOut_10_sync_0), // @[MixedNode.scala:542:17]
.auto_out_0 (element_reset_domain_auto_element_seip_nodes_in_2_0)
); // @[Crossing.scala:109:29]
IntSyncSyncCrossingSink_n1x1_27 intsink_11 ( // @[Crossing.scala:109:29]
.auto_in_sync_0 (intInClockXingOut_11_sync_0), // @[MixedNode.scala:542:17]
.auto_out_0 (element_reset_domain_auto_element_seip_nodes_in_3_0)
); // @[Crossing.scala:109:29]
IntSyncCrossingSource_n0x0 intsource ( // @[Crossing.scala:29:31]
.clock (childClock), // @[LazyModuleImp.scala:155:31]
.reset (childReset) // @[LazyModuleImp.scala:158:31]
); // @[Crossing.scala:29:31]
IntSyncCrossingSource_n0x0_1 intsource_1 ( // @[Crossing.scala:29:31]
.clock (childClock), // @[LazyModuleImp.scala:155:31]
.reset (childReset) // @[LazyModuleImp.scala:158:31]
); // @[Crossing.scala:29:31]
IntSyncCrossingSource_n0x0_2 intsource_2 ( // @[Crossing.scala:29:31]
.clock (childClock), // @[LazyModuleImp.scala:155:31]
.reset (childReset) // @[LazyModuleImp.scala:158:31]
); // @[Crossing.scala:29:31]
IntSyncCrossingSource_n0x0_3 intsource_3 ( // @[Crossing.scala:29:31]
.clock (childClock), // @[LazyModuleImp.scala:155:31]
.reset (childReset) // @[LazyModuleImp.scala:158:31]
); // @[Crossing.scala:29:31]
IntSyncCrossingSource_n0x0_4 intsource_4 ( // @[Crossing.scala:29:31]
.clock (childClock), // @[LazyModuleImp.scala:155:31]
.reset (childReset) // @[LazyModuleImp.scala:158:31]
); // @[Crossing.scala:29:31]
IntSyncCrossingSource_n1x5 intsource_5 ( // @[Crossing.scala:29:31]
.clock (childClock), // @[LazyModuleImp.scala:155:31]
.reset (childReset), // @[LazyModuleImp.scala:158:31]
.auto_out_sync_0 (intOutClockXingIn_sync_0),
.auto_out_sync_1 (intOutClockXingIn_sync_1),
.auto_out_sync_2 (intOutClockXingIn_sync_2),
.auto_out_sync_3 (intOutClockXingIn_sync_3),
.auto_out_sync_4 (intOutClockXingIn_sync_4)
); // @[Crossing.scala:29:31]
IntSyncCrossingSource_n1x5_1 intsource_6 ( // @[Crossing.scala:29:31]
.clock (childClock), // @[LazyModuleImp.scala:155:31]
.reset (childReset), // @[LazyModuleImp.scala:158:31]
.auto_in_0 (element_reset_domain_auto_element_xbar_anon_out_1_0), // @[ClockDomain.scala:14:9]
.auto_in_1 (element_reset_domain_auto_element_xbar_anon_out_1_1), // @[ClockDomain.scala:14:9]
.auto_in_2 (element_reset_domain_auto_element_xbar_anon_out_1_2), // @[ClockDomain.scala:14:9]
.auto_in_3 (element_reset_domain_auto_element_xbar_anon_out_1_3), // @[ClockDomain.scala:14:9]
.auto_out_sync_0 (intOutClockXingIn_1_sync_0),
.auto_out_sync_1 (intOutClockXingIn_1_sync_1),
.auto_out_sync_2 (intOutClockXingIn_1_sync_2),
.auto_out_sync_3 (intOutClockXingIn_1_sync_3),
.auto_out_sync_4 (intOutClockXingIn_1_sync_4)
); // @[Crossing.scala:29:31]
IntSyncCrossingSource_n1x5_2 intsource_7 ( // @[Crossing.scala:29:31]
.clock (childClock), // @[LazyModuleImp.scala:155:31]
.reset (childReset), // @[LazyModuleImp.scala:158:31]
.auto_out_sync_0 (intOutClockXingIn_2_sync_0),
.auto_out_sync_1 (intOutClockXingIn_2_sync_1),
.auto_out_sync_2 (intOutClockXingIn_2_sync_2),
.auto_out_sync_3 (intOutClockXingIn_2_sync_3),
.auto_out_sync_4 (intOutClockXingIn_2_sync_4)
); // @[Crossing.scala:29:31]
assign auto_element_reset_domain_element_trace_nodes_out_3_insns_0_valid = auto_element_reset_domain_element_trace_nodes_out_3_insns_0_valid_0; // @[ClockDomain.scala:14:9]
assign auto_element_reset_domain_element_trace_nodes_out_3_insns_0_iaddr = auto_element_reset_domain_element_trace_nodes_out_3_insns_0_iaddr_0; // @[ClockDomain.scala:14:9]
assign auto_element_reset_domain_element_trace_nodes_out_3_insns_0_insn = auto_element_reset_domain_element_trace_nodes_out_3_insns_0_insn_0; // @[ClockDomain.scala:14:9]
assign auto_element_reset_domain_element_trace_nodes_out_3_insns_0_priv = auto_element_reset_domain_element_trace_nodes_out_3_insns_0_priv_0; // @[ClockDomain.scala:14:9]
assign auto_element_reset_domain_element_trace_nodes_out_3_insns_0_exception = auto_element_reset_domain_element_trace_nodes_out_3_insns_0_exception_0; // @[ClockDomain.scala:14:9]
assign auto_element_reset_domain_element_trace_nodes_out_3_insns_0_interrupt = auto_element_reset_domain_element_trace_nodes_out_3_insns_0_interrupt_0; // @[ClockDomain.scala:14:9]
assign auto_element_reset_domain_element_trace_nodes_out_3_insns_0_cause = auto_element_reset_domain_element_trace_nodes_out_3_insns_0_cause_0; // @[ClockDomain.scala:14:9]
assign auto_element_reset_domain_element_trace_nodes_out_3_insns_0_tval = auto_element_reset_domain_element_trace_nodes_out_3_insns_0_tval_0; // @[ClockDomain.scala:14:9]
assign auto_element_reset_domain_element_trace_nodes_out_3_time = auto_element_reset_domain_element_trace_nodes_out_3_time_0; // @[ClockDomain.scala:14:9]
assign auto_element_reset_domain_element_trace_nodes_out_2_insns_0_valid = auto_element_reset_domain_element_trace_nodes_out_2_insns_0_valid_0; // @[ClockDomain.scala:14:9]
assign auto_element_reset_domain_element_trace_nodes_out_2_insns_0_iaddr = auto_element_reset_domain_element_trace_nodes_out_2_insns_0_iaddr_0; // @[ClockDomain.scala:14:9]
assign auto_element_reset_domain_element_trace_nodes_out_2_insns_0_insn = auto_element_reset_domain_element_trace_nodes_out_2_insns_0_insn_0; // @[ClockDomain.scala:14:9]
assign auto_element_reset_domain_element_trace_nodes_out_2_insns_0_priv = auto_element_reset_domain_element_trace_nodes_out_2_insns_0_priv_0; // @[ClockDomain.scala:14:9]
assign auto_element_reset_domain_element_trace_nodes_out_2_insns_0_exception = auto_element_reset_domain_element_trace_nodes_out_2_insns_0_exception_0; // @[ClockDomain.scala:14:9]
assign auto_element_reset_domain_element_trace_nodes_out_2_insns_0_interrupt = auto_element_reset_domain_element_trace_nodes_out_2_insns_0_interrupt_0; // @[ClockDomain.scala:14:9]
assign auto_element_reset_domain_element_trace_nodes_out_2_insns_0_cause = auto_element_reset_domain_element_trace_nodes_out_2_insns_0_cause_0; // @[ClockDomain.scala:14:9]
assign auto_element_reset_domain_element_trace_nodes_out_2_insns_0_tval = auto_element_reset_domain_element_trace_nodes_out_2_insns_0_tval_0; // @[ClockDomain.scala:14:9]
assign auto_element_reset_domain_element_trace_nodes_out_2_time = auto_element_reset_domain_element_trace_nodes_out_2_time_0; // @[ClockDomain.scala:14:9]
assign auto_element_reset_domain_element_trace_nodes_out_1_insns_0_valid = auto_element_reset_domain_element_trace_nodes_out_1_insns_0_valid_0; // @[ClockDomain.scala:14:9]
assign auto_element_reset_domain_element_trace_nodes_out_1_insns_0_iaddr = auto_element_reset_domain_element_trace_nodes_out_1_insns_0_iaddr_0; // @[ClockDomain.scala:14:9]
assign auto_element_reset_domain_element_trace_nodes_out_1_insns_0_insn = auto_element_reset_domain_element_trace_nodes_out_1_insns_0_insn_0; // @[ClockDomain.scala:14:9]
assign auto_element_reset_domain_element_trace_nodes_out_1_insns_0_priv = auto_element_reset_domain_element_trace_nodes_out_1_insns_0_priv_0; // @[ClockDomain.scala:14:9]
assign auto_element_reset_domain_element_trace_nodes_out_1_insns_0_exception = auto_element_reset_domain_element_trace_nodes_out_1_insns_0_exception_0; // @[ClockDomain.scala:14:9]
assign auto_element_reset_domain_element_trace_nodes_out_1_insns_0_interrupt = auto_element_reset_domain_element_trace_nodes_out_1_insns_0_interrupt_0; // @[ClockDomain.scala:14:9]
assign auto_element_reset_domain_element_trace_nodes_out_1_insns_0_cause = auto_element_reset_domain_element_trace_nodes_out_1_insns_0_cause_0; // @[ClockDomain.scala:14:9]
assign auto_element_reset_domain_element_trace_nodes_out_1_insns_0_tval = auto_element_reset_domain_element_trace_nodes_out_1_insns_0_tval_0; // @[ClockDomain.scala:14:9]
assign auto_element_reset_domain_element_trace_nodes_out_1_time = auto_element_reset_domain_element_trace_nodes_out_1_time_0; // @[ClockDomain.scala:14:9]
assign auto_element_reset_domain_element_trace_nodes_out_0_insns_0_valid = auto_element_reset_domain_element_trace_nodes_out_0_insns_0_valid_0; // @[ClockDomain.scala:14:9]
assign auto_element_reset_domain_element_trace_nodes_out_0_insns_0_iaddr = auto_element_reset_domain_element_trace_nodes_out_0_insns_0_iaddr_0; // @[ClockDomain.scala:14:9]
assign auto_element_reset_domain_element_trace_nodes_out_0_insns_0_insn = auto_element_reset_domain_element_trace_nodes_out_0_insns_0_insn_0; // @[ClockDomain.scala:14:9]
assign auto_element_reset_domain_element_trace_nodes_out_0_insns_0_priv = auto_element_reset_domain_element_trace_nodes_out_0_insns_0_priv_0; // @[ClockDomain.scala:14:9]
assign auto_element_reset_domain_element_trace_nodes_out_0_insns_0_exception = auto_element_reset_domain_element_trace_nodes_out_0_insns_0_exception_0; // @[ClockDomain.scala:14:9]
assign auto_element_reset_domain_element_trace_nodes_out_0_insns_0_interrupt = auto_element_reset_domain_element_trace_nodes_out_0_insns_0_interrupt_0; // @[ClockDomain.scala:14:9]
assign auto_element_reset_domain_element_trace_nodes_out_0_insns_0_cause = auto_element_reset_domain_element_trace_nodes_out_0_insns_0_cause_0; // @[ClockDomain.scala:14:9]
assign auto_element_reset_domain_element_trace_nodes_out_0_insns_0_tval = auto_element_reset_domain_element_trace_nodes_out_0_insns_0_tval_0; // @[ClockDomain.scala:14:9]
assign auto_element_reset_domain_element_trace_nodes_out_0_time = auto_element_reset_domain_element_trace_nodes_out_0_time_0; // @[ClockDomain.scala:14:9]
assign auto_int_out_clock_xing_out_2_sync_0 = auto_int_out_clock_xing_out_2_sync_0_0; // @[ClockDomain.scala:14:9]
assign auto_int_out_clock_xing_out_2_sync_1 = auto_int_out_clock_xing_out_2_sync_1_0; // @[ClockDomain.scala:14:9]
assign auto_int_out_clock_xing_out_2_sync_2 = auto_int_out_clock_xing_out_2_sync_2_0; // @[ClockDomain.scala:14:9]
assign auto_int_out_clock_xing_out_2_sync_3 = auto_int_out_clock_xing_out_2_sync_3_0; // @[ClockDomain.scala:14:9]
assign auto_int_out_clock_xing_out_2_sync_4 = auto_int_out_clock_xing_out_2_sync_4_0; // @[ClockDomain.scala:14:9]
assign auto_int_out_clock_xing_out_1_sync_0 = auto_int_out_clock_xing_out_1_sync_0_0; // @[ClockDomain.scala:14:9]
assign auto_int_out_clock_xing_out_1_sync_1 = auto_int_out_clock_xing_out_1_sync_1_0; // @[ClockDomain.scala:14:9]
assign auto_int_out_clock_xing_out_1_sync_2 = auto_int_out_clock_xing_out_1_sync_2_0; // @[ClockDomain.scala:14:9]
assign auto_int_out_clock_xing_out_1_sync_3 = auto_int_out_clock_xing_out_1_sync_3_0; // @[ClockDomain.scala:14:9]
assign auto_int_out_clock_xing_out_1_sync_4 = auto_int_out_clock_xing_out_1_sync_4_0; // @[ClockDomain.scala:14:9]
assign auto_int_out_clock_xing_out_0_sync_0 = auto_int_out_clock_xing_out_0_sync_0_0; // @[ClockDomain.scala:14:9]
assign auto_int_out_clock_xing_out_0_sync_1 = auto_int_out_clock_xing_out_0_sync_1_0; // @[ClockDomain.scala:14:9]
assign auto_int_out_clock_xing_out_0_sync_2 = auto_int_out_clock_xing_out_0_sync_2_0; // @[ClockDomain.scala:14:9]
assign auto_int_out_clock_xing_out_0_sync_3 = auto_int_out_clock_xing_out_0_sync_3_0; // @[ClockDomain.scala:14:9]
assign auto_int_out_clock_xing_out_0_sync_4 = auto_int_out_clock_xing_out_0_sync_4_0; // @[ClockDomain.scala:14:9]
assign auto_tl_master_clock_xing_out_a_valid = auto_tl_master_clock_xing_out_a_valid_0; // @[ClockDomain.scala:14:9]
assign auto_tl_master_clock_xing_out_a_bits_opcode = auto_tl_master_clock_xing_out_a_bits_opcode_0; // @[ClockDomain.scala:14:9]
assign auto_tl_master_clock_xing_out_a_bits_param = auto_tl_master_clock_xing_out_a_bits_param_0; // @[ClockDomain.scala:14:9]
assign auto_tl_master_clock_xing_out_a_bits_size = auto_tl_master_clock_xing_out_a_bits_size_0; // @[ClockDomain.scala:14:9]
assign auto_tl_master_clock_xing_out_a_bits_source = auto_tl_master_clock_xing_out_a_bits_source_0; // @[ClockDomain.scala:14:9]
assign auto_tl_master_clock_xing_out_a_bits_address = auto_tl_master_clock_xing_out_a_bits_address_0; // @[ClockDomain.scala:14:9]
assign auto_tl_master_clock_xing_out_a_bits_mask = auto_tl_master_clock_xing_out_a_bits_mask_0; // @[ClockDomain.scala:14:9]
assign auto_tl_master_clock_xing_out_a_bits_data = auto_tl_master_clock_xing_out_a_bits_data_0; // @[ClockDomain.scala:14:9]
assign auto_tl_master_clock_xing_out_a_bits_corrupt = auto_tl_master_clock_xing_out_a_bits_corrupt_0; // @[ClockDomain.scala:14:9]
assign auto_tl_master_clock_xing_out_b_ready = auto_tl_master_clock_xing_out_b_ready_0; // @[ClockDomain.scala:14:9]
assign auto_tl_master_clock_xing_out_c_valid = auto_tl_master_clock_xing_out_c_valid_0; // @[ClockDomain.scala:14:9]
assign auto_tl_master_clock_xing_out_c_bits_opcode = auto_tl_master_clock_xing_out_c_bits_opcode_0; // @[ClockDomain.scala:14:9]
assign auto_tl_master_clock_xing_out_c_bits_param = auto_tl_master_clock_xing_out_c_bits_param_0; // @[ClockDomain.scala:14:9]
assign auto_tl_master_clock_xing_out_c_bits_size = auto_tl_master_clock_xing_out_c_bits_size_0; // @[ClockDomain.scala:14:9]
assign auto_tl_master_clock_xing_out_c_bits_source = auto_tl_master_clock_xing_out_c_bits_source_0; // @[ClockDomain.scala:14:9]
assign auto_tl_master_clock_xing_out_c_bits_address = auto_tl_master_clock_xing_out_c_bits_address_0; // @[ClockDomain.scala:14:9]
assign auto_tl_master_clock_xing_out_c_bits_data = auto_tl_master_clock_xing_out_c_bits_data_0; // @[ClockDomain.scala:14:9]
assign auto_tl_master_clock_xing_out_c_bits_corrupt = auto_tl_master_clock_xing_out_c_bits_corrupt_0; // @[ClockDomain.scala:14:9]
assign auto_tl_master_clock_xing_out_d_ready = auto_tl_master_clock_xing_out_d_ready_0; // @[ClockDomain.scala:14:9]
assign auto_tl_master_clock_xing_out_e_valid = auto_tl_master_clock_xing_out_e_valid_0; // @[ClockDomain.scala:14:9]
assign auto_tl_master_clock_xing_out_e_bits_sink = auto_tl_master_clock_xing_out_e_bits_sink_0; // @[ClockDomain.scala:14:9]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File SynchronizerReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util.{RegEnable, Cat}
/** These wrap behavioral
* shift and next registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
*
* These are built up of *ResetSynchronizerPrimitiveShiftReg,
* intended to be replaced by the integrator's metastable flops chains or replaced
* at this level if they have a multi-bit wide synchronizer primitive.
* The different types vary in their reset behavior:
* NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin
* AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep
* 1-bit-wide shift registers.
* SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg
*
* [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference.
*
* ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross
* Clock Domains.
*/
object SynchronizerResetType extends Enumeration {
val NonSync, Inferred, Sync, Async = Value
}
// Note: this should not be used directly.
// Use the companion object to generate this with the correct reset type mixin.
private class SynchronizerPrimitiveShiftReg(
sync: Int,
init: Boolean,
resetType: SynchronizerResetType.Value)
extends AbstractPipelineReg(1) {
val initInt = if (init) 1 else 0
val initPostfix = resetType match {
case SynchronizerResetType.NonSync => ""
case _ => s"_i${initInt}"
}
override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}"
val chain = List.tabulate(sync) { i =>
val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B)
reg.suggestName(s"sync_$i")
}
chain.last := io.d.asBool
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink := source
}
io.q := chain.head.asUInt
}
private object SynchronizerPrimitiveShiftReg {
def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = {
val gen: () => SynchronizerPrimitiveShiftReg = resetType match {
case SynchronizerResetType.NonSync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
case SynchronizerResetType.Async =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset
case SynchronizerResetType.Sync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset
case SynchronizerResetType.Inferred =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
}
AbstractPipelineReg(gen(), in)
}
}
// Note: This module may end up with a non-AsyncReset type reset.
// But the Primitives within will always have AsyncReset type.
class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int)
extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asAsyncReset){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async)
}
}
io.q := Cat(output.reverse)
}
object AsyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
// Note: This module may end up with a non-Bool type reset.
// But the Primitives within will always have Bool reset type.
@deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2")
class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asBool){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync)
}
}
io.q := Cat(output.reverse)
}
object SyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred)
}
io.q := Cat(output.reverse)
}
object ResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}"
val output = Seq.tabulate(w) { i =>
SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync)
}
io.q := Cat(output.reverse)
}
object SynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, None)
def apply [T <: Data](in: T): T =
apply (in, 3, None)
}
class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module {
override def desiredName = s"ClockCrossingReg_w${w}"
val io = IO(new Bundle{
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
val en = Input(Bool())
})
val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en)
io.q := cdc_reg
}
object ClockCrossingReg {
def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = {
val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit))
name.foreach{ cdc_reg.suggestName(_) }
cdc_reg.io.d := in.asUInt
cdc_reg.io.en := en
cdc_reg.io.q.asTypeOf(in)
}
}
| module AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_254( // @[SynchronizerReg.scala:68:19]
input clock, // @[SynchronizerReg.scala:68:19]
input reset, // @[SynchronizerReg.scala:68:19]
input io_d, // @[ShiftReg.scala:36:14]
output io_q // @[ShiftReg.scala:36:14]
);
wire io_d_0 = io_d; // @[SynchronizerReg.scala:68:19]
wire _sync_2_T = io_d_0; // @[SynchronizerReg.scala:54:22, :68:19]
wire io_q_0; // @[SynchronizerReg.scala:68:19]
reg sync_0; // @[SynchronizerReg.scala:51:87]
assign io_q_0 = sync_0; // @[SynchronizerReg.scala:51:87, :68:19]
reg sync_1; // @[SynchronizerReg.scala:51:87]
reg sync_2; // @[SynchronizerReg.scala:51:87]
always @(posedge clock or posedge reset) begin // @[SynchronizerReg.scala:68:19]
if (reset) begin // @[SynchronizerReg.scala:68:19]
sync_0 <= 1'h0; // @[SynchronizerReg.scala:51:87]
sync_1 <= 1'h0; // @[SynchronizerReg.scala:51:87]
sync_2 <= 1'h0; // @[SynchronizerReg.scala:51:87]
end
else begin // @[SynchronizerReg.scala:68:19]
sync_0 <= sync_1; // @[SynchronizerReg.scala:51:87]
sync_1 <= sync_2; // @[SynchronizerReg.scala:51:87]
sync_2 <= _sync_2_T; // @[SynchronizerReg.scala:51:87, :54:22]
end
always @(posedge, posedge) |
Generate the Verilog code corresponding to the following Chisel files.
File HellaCache.scala:
// See LICENSE.SiFive for license details.
// See LICENSE.Berkeley for license details.
package freechips.rocketchip.rocket
import chisel3.{dontTouch, _}
import chisel3.util._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy.bundlebridge._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.amba.AMBAProtField
import freechips.rocketchip.diplomacy.{IdRange, TransferSizes, RegionType}
import freechips.rocketchip.tile.{L1CacheParams, HasL1CacheParameters, HasCoreParameters, CoreBundle, HasNonDiplomaticTileParameters, BaseTile, HasTileParameters}
import freechips.rocketchip.tilelink.{TLMasterParameters, TLClientNode, TLMasterPortParameters, TLEdgeOut, TLWidthWidget, TLFIFOFixer, ClientMetadata}
import freechips.rocketchip.util.{Code, RandomReplacement, ParameterizedBundle}
import freechips.rocketchip.util.{BooleanToAugmentedBoolean, IntToAugmentedInt}
import scala.collection.mutable.ListBuffer
case class DCacheParams(
nSets: Int = 64,
nWays: Int = 4,
rowBits: Int = 64,
subWordBits: Option[Int] = None,
replacementPolicy: String = "random",
nTLBSets: Int = 1,
nTLBWays: Int = 32,
nTLBBasePageSectors: Int = 4,
nTLBSuperpages: Int = 4,
tagECC: Option[String] = None,
dataECC: Option[String] = None,
dataECCBytes: Int = 1,
nMSHRs: Int = 1,
nSDQ: Int = 17,
nRPQ: Int = 16,
nMMIOs: Int = 1,
blockBytes: Int = 64,
separateUncachedResp: Boolean = false,
acquireBeforeRelease: Boolean = false,
pipelineWayMux: Boolean = false,
clockGate: Boolean = false,
scratch: Option[BigInt] = None) extends L1CacheParams {
def tagCode: Code = Code.fromString(tagECC)
def dataCode: Code = Code.fromString(dataECC)
def dataScratchpadBytes: Int = scratch.map(_ => nSets*blockBytes).getOrElse(0)
def replacement = new RandomReplacement(nWays)
def silentDrop: Boolean = !acquireBeforeRelease
require((!scratch.isDefined || nWays == 1),
"Scratchpad only allowed in direct-mapped cache.")
require((!scratch.isDefined || nMSHRs == 0),
"Scratchpad only allowed in blocking cache.")
if (scratch.isEmpty)
require(isPow2(nSets), s"nSets($nSets) must be pow2")
}
trait HasL1HellaCacheParameters extends HasL1CacheParameters with HasCoreParameters {
val cacheParams = tileParams.dcache.get
val cfg = cacheParams
def wordBits = coreDataBits
def wordBytes = coreDataBytes
def subWordBits = cacheParams.subWordBits.getOrElse(wordBits)
def subWordBytes = subWordBits / 8
def wordOffBits = log2Up(wordBytes)
def beatBytes = cacheBlockBytes / cacheDataBeats
def beatWords = beatBytes / wordBytes
def beatOffBits = log2Up(beatBytes)
def idxMSB = untagBits-1
def idxLSB = blockOffBits
def offsetmsb = idxLSB-1
def offsetlsb = wordOffBits
def rowWords = rowBits/wordBits
def doNarrowRead = coreDataBits * nWays % rowBits == 0
def eccBytes = cacheParams.dataECCBytes
val eccBits = cacheParams.dataECCBytes * 8
val encBits = cacheParams.dataCode.width(eccBits)
val encWordBits = encBits * (wordBits / eccBits)
def encDataBits = cacheParams.dataCode.width(coreDataBits) // NBDCache only
def encRowBits = encDataBits*rowWords
def lrscCycles = coreParams.lrscCycles // ISA requires 16-insn LRSC sequences to succeed
def lrscBackoff = 3 // disallow LRSC reacquisition briefly
def blockProbeAfterGrantCycles = 8 // give the processor some time to issue a request after a grant
def nIOMSHRs = cacheParams.nMMIOs
def maxUncachedInFlight = cacheParams.nMMIOs
def dataScratchpadSize = cacheParams.dataScratchpadBytes
require(rowBits >= coreDataBits, s"rowBits($rowBits) < coreDataBits($coreDataBits)")
if (!usingDataScratchpad)
require(rowBits == cacheDataBits, s"rowBits($rowBits) != cacheDataBits($cacheDataBits)")
// would need offset addr for puts if data width < xlen
require(xLen <= cacheDataBits, s"xLen($xLen) > cacheDataBits($cacheDataBits)")
}
abstract class L1HellaCacheModule(implicit val p: Parameters) extends Module
with HasL1HellaCacheParameters
abstract class L1HellaCacheBundle(implicit val p: Parameters) extends ParameterizedBundle()(p)
with HasL1HellaCacheParameters
/** Bundle definitions for HellaCache interfaces */
trait HasCoreMemOp extends HasL1HellaCacheParameters {
val addr = UInt(coreMaxAddrBits.W)
val idx = (usingVM && untagBits > pgIdxBits).option(UInt(coreMaxAddrBits.W))
val tag = UInt((coreParams.dcacheReqTagBits + log2Ceil(dcacheArbPorts)).W)
val cmd = UInt(M_SZ.W)
val size = UInt(log2Ceil(coreDataBytes.log2 + 1).W)
val signed = Bool()
val dprv = UInt(PRV.SZ.W)
val dv = Bool()
}
trait HasCoreData extends HasCoreParameters {
val data = UInt(coreDataBits.W)
val mask = UInt(coreDataBytes.W)
}
class HellaCacheReqInternal(implicit p: Parameters) extends CoreBundle()(p) with HasCoreMemOp {
val phys = Bool()
val no_resp = Bool() // The dcache may omit generating a response for this request
val no_alloc = Bool()
val no_xcpt = Bool()
}
class HellaCacheReq(implicit p: Parameters) extends HellaCacheReqInternal()(p) with HasCoreData
class HellaCacheResp(implicit p: Parameters) extends CoreBundle()(p)
with HasCoreMemOp
with HasCoreData {
val replay = Bool()
val has_data = Bool()
val data_word_bypass = UInt(coreDataBits.W)
val data_raw = UInt(coreDataBits.W)
val store_data = UInt(coreDataBits.W)
}
class AlignmentExceptions extends Bundle {
val ld = Bool()
val st = Bool()
}
class HellaCacheExceptions extends Bundle {
val ma = new AlignmentExceptions
val pf = new AlignmentExceptions
val gf = new AlignmentExceptions
val ae = new AlignmentExceptions
}
class HellaCacheWriteData(implicit p: Parameters) extends CoreBundle()(p) with HasCoreData
class HellaCachePerfEvents extends Bundle {
val acquire = Bool()
val release = Bool()
val grant = Bool()
val tlbMiss = Bool()
val blocked = Bool()
val canAcceptStoreThenLoad = Bool()
val canAcceptStoreThenRMW = Bool()
val canAcceptLoadThenLoad = Bool()
val storeBufferEmptyAfterLoad = Bool()
val storeBufferEmptyAfterStore = Bool()
}
// interface between D$ and processor/DTLB
class HellaCacheIO(implicit p: Parameters) extends CoreBundle()(p) {
val req = Decoupled(new HellaCacheReq)
val s1_kill = Output(Bool()) // kill previous cycle's req
val s1_data = Output(new HellaCacheWriteData()) // data for previous cycle's req
val s2_nack = Input(Bool()) // req from two cycles ago is rejected
val s2_nack_cause_raw = Input(Bool()) // reason for nack is store-load RAW hazard (performance hint)
val s2_kill = Output(Bool()) // kill req from two cycles ago
val s2_uncached = Input(Bool()) // advisory signal that the access is MMIO
val s2_paddr = Input(UInt(paddrBits.W)) // translated address
val resp = Flipped(Valid(new HellaCacheResp))
val replay_next = Input(Bool())
val s2_xcpt = Input(new HellaCacheExceptions)
val s2_gpa = Input(UInt(vaddrBitsExtended.W))
val s2_gpa_is_pte = Input(Bool())
val uncached_resp = tileParams.dcache.get.separateUncachedResp.option(Flipped(Decoupled(new HellaCacheResp)))
val ordered = Input(Bool())
val store_pending = Input(Bool()) // there is a store in a store buffer somewhere
val perf = Input(new HellaCachePerfEvents())
val keep_clock_enabled = Output(Bool()) // should D$ avoid clock-gating itself?
val clock_enabled = Input(Bool()) // is D$ currently being clocked?
}
/** Base classes for Diplomatic TL2 HellaCaches */
abstract class HellaCache(tileId: Int)(implicit p: Parameters) extends LazyModule
with HasNonDiplomaticTileParameters {
protected val cfg = tileParams.dcache.get
protected def cacheClientParameters = cfg.scratch.map(x => Seq()).getOrElse(Seq(TLMasterParameters.v1(
name = s"Core ${tileId} DCache",
sourceId = IdRange(0, 1 max cfg.nMSHRs),
supportsProbe = TransferSizes(cfg.blockBytes, cfg.blockBytes))))
protected def mmioClientParameters = Seq(TLMasterParameters.v1(
name = s"Core ${tileId} DCache MMIO",
sourceId = IdRange(firstMMIO, firstMMIO + cfg.nMMIOs),
requestFifo = true))
def firstMMIO = (cacheClientParameters.map(_.sourceId.end) :+ 0).max
val node = TLClientNode(Seq(TLMasterPortParameters.v1(
clients = cacheClientParameters ++ mmioClientParameters,
minLatency = 1,
requestFields = tileParams.core.useVM.option(Seq()).getOrElse(Seq(AMBAProtField())))))
val hartIdSinkNodeOpt = cfg.scratch.map(_ => BundleBridgeSink[UInt]())
val mmioAddressPrefixSinkNodeOpt = cfg.scratch.map(_ => BundleBridgeSink[UInt]())
val module: HellaCacheModule
def flushOnFenceI = cfg.scratch.isEmpty && !node.edges.out(0).manager.managers.forall(m => !m.supportsAcquireB || !m.executable || m.regionType >= RegionType.TRACKED || m.regionType <= RegionType.IDEMPOTENT)
def canSupportCFlushLine = !usingVM || cfg.blockBytes * cfg.nSets <= (1 << pgIdxBits)
require(!tileParams.core.haveCFlush || cfg.scratch.isEmpty, "CFLUSH_D_L1 instruction requires a D$")
}
class HellaCacheBundle(implicit p: Parameters) extends CoreBundle()(p) {
val cpu = Flipped(new HellaCacheIO)
val ptw = new TLBPTWIO()
val errors = new DCacheErrors
val tlb_port = new DCacheTLBPort
}
class HellaCacheModule(outer: HellaCache) extends LazyModuleImp(outer)
with HasL1HellaCacheParameters {
implicit val edge: TLEdgeOut = outer.node.edges.out(0)
val (tl_out, _) = outer.node.out(0)
val io = IO(new HellaCacheBundle)
val io_hartid = outer.hartIdSinkNodeOpt.map(_.bundle)
val io_mmio_address_prefix = outer.mmioAddressPrefixSinkNodeOpt.map(_.bundle)
dontTouch(io.cpu.resp) // Users like to monitor these fields even if the core ignores some signals
dontTouch(io.cpu.s1_data)
require(rowBits == edge.bundle.dataBits)
private val fifoManagers = edge.manager.managers.filter(TLFIFOFixer.allVolatile)
fifoManagers.foreach { m =>
require (m.fifoId == fifoManagers.head.fifoId,
s"IOMSHRs must be FIFO for all regions with effects, but HellaCache sees\n"+
s"${m.nodePath.map(_.name)}\nversus\n${fifoManagers.head.nodePath.map(_.name)}")
}
}
/** Support overriding which HellaCache is instantiated */
case object BuildHellaCache extends Field[BaseTile => Parameters => HellaCache](HellaCacheFactory.apply)
object HellaCacheFactory {
def apply(tile: BaseTile)(p: Parameters): HellaCache = {
if (tile.tileParams.dcache.get.nMSHRs == 0)
new DCache(tile.tileId, tile.crossing)(p)
else
new NonBlockingDCache(tile.tileId)(p)
}
}
/** Mix-ins for constructing tiles that have a HellaCache */
trait HasHellaCache { this: BaseTile =>
val module: HasHellaCacheModule
implicit val p: Parameters
var nDCachePorts = 0
lazy val dcache: HellaCache = LazyModule(p(BuildHellaCache)(this)(p))
tlMasterXbar.node := TLWidthWidget(tileParams.dcache.get.rowBits/8) := dcache.node
dcache.hartIdSinkNodeOpt.map { _ := hartIdNexusNode }
dcache.mmioAddressPrefixSinkNodeOpt.map { _ := mmioAddressPrefixNexusNode }
InModuleBody {
dcache.module.io.tlb_port := DontCare
}
}
trait HasHellaCacheModule {
val outer: HasHellaCache with HasTileParameters
implicit val p: Parameters
val dcachePorts = ListBuffer[HellaCacheIO]()
val dcacheArb = Module(new HellaCacheArbiter(outer.nDCachePorts)(outer.p))
outer.dcache.module.io.cpu <> dcacheArb.io.mem
}
/** Metadata array used for all HellaCaches */
class L1Metadata(implicit p: Parameters) extends L1HellaCacheBundle()(p) {
val coh = new ClientMetadata
val tag = UInt(tagBits.W)
}
object L1Metadata {
def apply(tag: Bits, coh: ClientMetadata)(implicit p: Parameters) = {
val meta = Wire(new L1Metadata)
meta.tag := tag
meta.coh := coh
meta
}
}
class L1MetaReadReq(implicit p: Parameters) extends L1HellaCacheBundle()(p) {
val idx = UInt(idxBits.W)
val way_en = UInt(nWays.W)
val tag = UInt(tagBits.W)
}
class L1MetaWriteReq(implicit p: Parameters) extends L1MetaReadReq()(p) {
val data = new L1Metadata
}
class L1MetadataArray[T <: L1Metadata](onReset: () => T)(implicit p: Parameters) extends L1HellaCacheModule()(p) {
val rstVal = onReset()
val io = IO(new Bundle {
val read = Flipped(Decoupled(new L1MetaReadReq))
val write = Flipped(Decoupled(new L1MetaWriteReq))
val resp = Output(Vec(nWays, rstVal.cloneType))
})
val rst_cnt = RegInit(0.U(log2Up(nSets+1).W))
val rst = rst_cnt < nSets.U
val waddr = Mux(rst, rst_cnt, io.write.bits.idx)
val wdata = Mux(rst, rstVal, io.write.bits.data).asUInt
val wmask = Mux(rst || (nWays == 1).B, (-1).S, io.write.bits.way_en.asSInt).asBools
val rmask = Mux(rst || (nWays == 1).B, (-1).S, io.read.bits.way_en.asSInt).asBools
when (rst) { rst_cnt := rst_cnt+1.U }
val metabits = rstVal.getWidth
val tag_array = SyncReadMem(nSets, Vec(nWays, UInt(metabits.W)))
val wen = rst || io.write.valid
when (wen) {
tag_array.write(waddr, VecInit.fill(nWays)(wdata), wmask)
}
io.resp := tag_array.read(io.read.bits.idx, io.read.fire).map(_.asTypeOf(chiselTypeOf(rstVal)))
io.read.ready := !wen // so really this could be a 6T RAM
io.write.ready := !rst
}
| module tag_array_1( // @[HellaCache.scala:339:30]
input [5:0] RW0_addr,
input RW0_en,
input RW0_clk,
input RW0_wmode,
input [87:0] RW0_wdata,
output [87:0] RW0_rdata,
input [3:0] RW0_wmask
);
tag_array_ext tag_array_ext ( // @[HellaCache.scala:339:30]
.RW0_addr (RW0_addr),
.RW0_en (RW0_en),
.RW0_clk (RW0_clk),
.RW0_wmode (RW0_wmode),
.RW0_wdata (RW0_wdata),
.RW0_rdata (RW0_rdata),
.RW0_wmask (RW0_wmask)
); // @[HellaCache.scala:339:30]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File RecFNToRecFN.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import consts._
class
RecFNToRecFN(
inExpWidth: Int, inSigWidth: Int, outExpWidth: Int, outSigWidth: Int)
extends chisel3.RawModule
{
val io = IO(new Bundle {
val in = Input(Bits((inExpWidth + inSigWidth + 1).W))
val roundingMode = Input(UInt(3.W))
val detectTininess = Input(UInt(1.W))
val out = Output(Bits((outExpWidth + outSigWidth + 1).W))
val exceptionFlags = Output(Bits(5.W))
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val rawIn = rawFloatFromRecFN(inExpWidth, inSigWidth, io.in);
if ((inExpWidth == outExpWidth) && (inSigWidth <= outSigWidth)) {
//--------------------------------------------------------------------
//--------------------------------------------------------------------
io.out := io.in<<(outSigWidth - inSigWidth)
io.exceptionFlags := isSigNaNRawFloat(rawIn) ## 0.U(4.W)
} else {
//--------------------------------------------------------------------
//--------------------------------------------------------------------
val roundAnyRawFNToRecFN =
Module(
new RoundAnyRawFNToRecFN(
inExpWidth,
inSigWidth,
outExpWidth,
outSigWidth,
flRoundOpt_sigMSBitAlwaysZero
))
roundAnyRawFNToRecFN.io.invalidExc := isSigNaNRawFloat(rawIn)
roundAnyRawFNToRecFN.io.infiniteExc := false.B
roundAnyRawFNToRecFN.io.in := rawIn
roundAnyRawFNToRecFN.io.roundingMode := io.roundingMode
roundAnyRawFNToRecFN.io.detectTininess := io.detectTininess
io.out := roundAnyRawFNToRecFN.io.out
io.exceptionFlags := roundAnyRawFNToRecFN.io.exceptionFlags
}
}
File rawFloatFromRecFN.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import chisel3.util._
/*----------------------------------------------------------------------------
| In the result, no more than one of 'isNaN', 'isInf', and 'isZero' will be
| set.
*----------------------------------------------------------------------------*/
object rawFloatFromRecFN
{
def apply(expWidth: Int, sigWidth: Int, in: Bits): RawFloat =
{
val exp = in(expWidth + sigWidth - 1, sigWidth - 1)
val isZero = exp(expWidth, expWidth - 2) === 0.U
val isSpecial = exp(expWidth, expWidth - 1) === 3.U
val out = Wire(new RawFloat(expWidth, sigWidth))
out.isNaN := isSpecial && exp(expWidth - 2)
out.isInf := isSpecial && ! exp(expWidth - 2)
out.isZero := isZero
out.sign := in(expWidth + sigWidth)
out.sExp := exp.zext
out.sig := 0.U(1.W) ## ! isZero ## in(sigWidth - 2, 0)
out
}
}
File common.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018 The Regents of
the University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
object consts {
/*------------------------------------------------------------------------
| For rounding to integer values, rounding mode 'odd' rounds to minimum
| magnitude instead, same as 'minMag'.
*------------------------------------------------------------------------*/
def round_near_even = "b000".U(3.W)
def round_minMag = "b001".U(3.W)
def round_min = "b010".U(3.W)
def round_max = "b011".U(3.W)
def round_near_maxMag = "b100".U(3.W)
def round_odd = "b110".U(3.W)
/*------------------------------------------------------------------------
*------------------------------------------------------------------------*/
def tininess_beforeRounding = 0.U
def tininess_afterRounding = 1.U
/*------------------------------------------------------------------------
*------------------------------------------------------------------------*/
def flRoundOpt_sigMSBitAlwaysZero = 1
def flRoundOpt_subnormsAlwaysExact = 2
def flRoundOpt_neverUnderflows = 4
def flRoundOpt_neverOverflows = 8
/*------------------------------------------------------------------------
*------------------------------------------------------------------------*/
def divSqrtOpt_twoBitsPerCycle = 16
}
class RawFloat(val expWidth: Int, val sigWidth: Int) extends Bundle
{
val isNaN: Bool = Bool() // overrides all other fields
val isInf: Bool = Bool() // overrides 'isZero', 'sExp', and 'sig'
val isZero: Bool = Bool() // overrides 'sExp' and 'sig'
val sign: Bool = Bool()
val sExp: SInt = SInt((expWidth + 2).W)
val sig: UInt = UInt((sigWidth + 1).W) // 2 m.s. bits cannot both be 0
}
//*** CHANGE THIS INTO A '.isSigNaN' METHOD OF THE 'RawFloat' CLASS:
object isSigNaNRawFloat
{
def apply(in: RawFloat): Bool = in.isNaN && !in.sig(in.sigWidth - 2)
}
| module RecFNToRecFN_8( // @[RecFNToRecFN.scala:44:5]
input [64:0] io_in, // @[RecFNToRecFN.scala:48:16]
input [2:0] io_roundingMode, // @[RecFNToRecFN.scala:48:16]
output [16:0] io_out, // @[RecFNToRecFN.scala:48:16]
output [4:0] io_exceptionFlags // @[RecFNToRecFN.scala:48:16]
);
wire [64:0] io_in_0 = io_in; // @[RecFNToRecFN.scala:44:5]
wire [2:0] io_roundingMode_0 = io_roundingMode; // @[RecFNToRecFN.scala:44:5]
wire io_detectTininess = 1'h1; // @[RecFNToRecFN.scala:44:5, :48:16, :72:19]
wire [16:0] io_out_0; // @[RecFNToRecFN.scala:44:5]
wire [4:0] io_exceptionFlags_0; // @[RecFNToRecFN.scala:44:5]
wire [11:0] rawIn_exp = io_in_0[63:52]; // @[rawFloatFromRecFN.scala:51:21]
wire [2:0] _rawIn_isZero_T = rawIn_exp[11:9]; // @[rawFloatFromRecFN.scala:51:21, :52:28]
wire rawIn_isZero = _rawIn_isZero_T == 3'h0; // @[rawFloatFromRecFN.scala:52:{28,53}]
wire rawIn_isZero_0 = rawIn_isZero; // @[rawFloatFromRecFN.scala:52:53, :55:23]
wire [1:0] _rawIn_isSpecial_T = rawIn_exp[11:10]; // @[rawFloatFromRecFN.scala:51:21, :53:28]
wire rawIn_isSpecial = &_rawIn_isSpecial_T; // @[rawFloatFromRecFN.scala:53:{28,53}]
wire _rawIn_out_isNaN_T_1; // @[rawFloatFromRecFN.scala:56:33]
wire _rawIn_out_isInf_T_2; // @[rawFloatFromRecFN.scala:57:33]
wire _rawIn_out_sign_T; // @[rawFloatFromRecFN.scala:59:25]
wire [12:0] _rawIn_out_sExp_T; // @[rawFloatFromRecFN.scala:60:27]
wire [53:0] _rawIn_out_sig_T_3; // @[rawFloatFromRecFN.scala:61:44]
wire rawIn_isNaN; // @[rawFloatFromRecFN.scala:55:23]
wire rawIn_isInf; // @[rawFloatFromRecFN.scala:55:23]
wire rawIn_sign; // @[rawFloatFromRecFN.scala:55:23]
wire [12:0] rawIn_sExp; // @[rawFloatFromRecFN.scala:55:23]
wire [53:0] rawIn_sig; // @[rawFloatFromRecFN.scala:55:23]
wire _rawIn_out_isNaN_T = rawIn_exp[9]; // @[rawFloatFromRecFN.scala:51:21, :56:41]
wire _rawIn_out_isInf_T = rawIn_exp[9]; // @[rawFloatFromRecFN.scala:51:21, :56:41, :57:41]
assign _rawIn_out_isNaN_T_1 = rawIn_isSpecial & _rawIn_out_isNaN_T; // @[rawFloatFromRecFN.scala:53:53, :56:{33,41}]
assign rawIn_isNaN = _rawIn_out_isNaN_T_1; // @[rawFloatFromRecFN.scala:55:23, :56:33]
wire _rawIn_out_isInf_T_1 = ~_rawIn_out_isInf_T; // @[rawFloatFromRecFN.scala:57:{36,41}]
assign _rawIn_out_isInf_T_2 = rawIn_isSpecial & _rawIn_out_isInf_T_1; // @[rawFloatFromRecFN.scala:53:53, :57:{33,36}]
assign rawIn_isInf = _rawIn_out_isInf_T_2; // @[rawFloatFromRecFN.scala:55:23, :57:33]
assign _rawIn_out_sign_T = io_in_0[64]; // @[rawFloatFromRecFN.scala:59:25]
assign rawIn_sign = _rawIn_out_sign_T; // @[rawFloatFromRecFN.scala:55:23, :59:25]
assign _rawIn_out_sExp_T = {1'h0, rawIn_exp}; // @[rawFloatFromRecFN.scala:51:21, :60:27]
assign rawIn_sExp = _rawIn_out_sExp_T; // @[rawFloatFromRecFN.scala:55:23, :60:27]
wire _rawIn_out_sig_T = ~rawIn_isZero; // @[rawFloatFromRecFN.scala:52:53, :61:35]
wire [1:0] _rawIn_out_sig_T_1 = {1'h0, _rawIn_out_sig_T}; // @[rawFloatFromRecFN.scala:61:{32,35}]
wire [51:0] _rawIn_out_sig_T_2 = io_in_0[51:0]; // @[rawFloatFromRecFN.scala:61:49]
assign _rawIn_out_sig_T_3 = {_rawIn_out_sig_T_1, _rawIn_out_sig_T_2}; // @[rawFloatFromRecFN.scala:61:{32,44,49}]
assign rawIn_sig = _rawIn_out_sig_T_3; // @[rawFloatFromRecFN.scala:55:23, :61:44]
wire _roundAnyRawFNToRecFN_io_invalidExc_T = rawIn_sig[51]; // @[rawFloatFromRecFN.scala:55:23]
wire _roundAnyRawFNToRecFN_io_invalidExc_T_1 = ~_roundAnyRawFNToRecFN_io_invalidExc_T; // @[common.scala:82:{49,56}]
wire _roundAnyRawFNToRecFN_io_invalidExc_T_2 = rawIn_isNaN & _roundAnyRawFNToRecFN_io_invalidExc_T_1; // @[rawFloatFromRecFN.scala:55:23]
RoundAnyRawFNToRecFN_ie11_is53_oe5_os11_4 roundAnyRawFNToRecFN ( // @[RecFNToRecFN.scala:72:19]
.io_invalidExc (_roundAnyRawFNToRecFN_io_invalidExc_T_2), // @[common.scala:82:46]
.io_in_isNaN (rawIn_isNaN), // @[rawFloatFromRecFN.scala:55:23]
.io_in_isInf (rawIn_isInf), // @[rawFloatFromRecFN.scala:55:23]
.io_in_isZero (rawIn_isZero_0), // @[rawFloatFromRecFN.scala:55:23]
.io_in_sign (rawIn_sign), // @[rawFloatFromRecFN.scala:55:23]
.io_in_sExp (rawIn_sExp), // @[rawFloatFromRecFN.scala:55:23]
.io_in_sig (rawIn_sig), // @[rawFloatFromRecFN.scala:55:23]
.io_roundingMode (io_roundingMode_0), // @[RecFNToRecFN.scala:44:5]
.io_out (io_out_0),
.io_exceptionFlags (io_exceptionFlags_0)
); // @[RecFNToRecFN.scala:72:19]
assign io_out = io_out_0; // @[RecFNToRecFN.scala:44:5]
assign io_exceptionFlags = io_exceptionFlags_0; // @[RecFNToRecFN.scala:44:5]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File InputUnit.scala:
package constellation.router
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.{Field, Parameters}
import freechips.rocketchip.util._
import constellation.channel._
import constellation.routing.{FlowRoutingBundle}
import constellation.noc.{HasNoCParams}
class AbstractInputUnitIO(
val cParam: BaseChannelParams,
val outParams: Seq[ChannelParams],
val egressParams: Seq[EgressChannelParams],
)(implicit val p: Parameters) extends Bundle with HasRouterOutputParams {
val nodeId = cParam.destId
val router_req = Decoupled(new RouteComputerReq)
val router_resp = Input(new RouteComputerResp(outParams, egressParams))
val vcalloc_req = Decoupled(new VCAllocReq(cParam, outParams, egressParams))
val vcalloc_resp = Input(new VCAllocResp(outParams, egressParams))
val out_credit_available = Input(MixedVec(allOutParams.map { u => Vec(u.nVirtualChannels, Bool()) }))
val salloc_req = Vec(cParam.destSpeedup, Decoupled(new SwitchAllocReq(outParams, egressParams)))
val out = Vec(cParam.destSpeedup, Valid(new SwitchBundle(outParams, egressParams)))
val debug = Output(new Bundle {
val va_stall = UInt(log2Ceil(cParam.nVirtualChannels).W)
val sa_stall = UInt(log2Ceil(cParam.nVirtualChannels).W)
})
val block = Input(Bool())
}
abstract class AbstractInputUnit(
val cParam: BaseChannelParams,
val outParams: Seq[ChannelParams],
val egressParams: Seq[EgressChannelParams]
)(implicit val p: Parameters) extends Module with HasRouterOutputParams with HasNoCParams {
val nodeId = cParam.destId
def io: AbstractInputUnitIO
}
class InputBuffer(cParam: ChannelParams)(implicit p: Parameters) extends Module {
val nVirtualChannels = cParam.nVirtualChannels
val io = IO(new Bundle {
val enq = Flipped(Vec(cParam.srcSpeedup, Valid(new Flit(cParam.payloadBits))))
val deq = Vec(cParam.nVirtualChannels, Decoupled(new BaseFlit(cParam.payloadBits)))
})
val useOutputQueues = cParam.useOutputQueues
val delims = if (useOutputQueues) {
cParam.virtualChannelParams.map(u => if (u.traversable) u.bufferSize else 0).scanLeft(0)(_+_)
} else {
// If no queuing, have to add an additional slot since head == tail implies empty
// TODO this should be fixed, should use all slots available
cParam.virtualChannelParams.map(u => if (u.traversable) u.bufferSize + 1 else 0).scanLeft(0)(_+_)
}
val starts = delims.dropRight(1).zipWithIndex.map { case (s,i) =>
if (cParam.virtualChannelParams(i).traversable) s else 0
}
val ends = delims.tail.zipWithIndex.map { case (s,i) =>
if (cParam.virtualChannelParams(i).traversable) s else 0
}
val fullSize = delims.last
// Ugly case. Use multiple queues
if ((cParam.srcSpeedup > 1 || cParam.destSpeedup > 1 || fullSize <= 1) || !cParam.unifiedBuffer) {
require(useOutputQueues)
val qs = cParam.virtualChannelParams.map(v => Module(new Queue(new BaseFlit(cParam.payloadBits), v.bufferSize)))
qs.zipWithIndex.foreach { case (q,i) =>
val sel = io.enq.map(f => f.valid && f.bits.virt_channel_id === i.U)
q.io.enq.valid := sel.orR
q.io.enq.bits.head := Mux1H(sel, io.enq.map(_.bits.head))
q.io.enq.bits.tail := Mux1H(sel, io.enq.map(_.bits.tail))
q.io.enq.bits.payload := Mux1H(sel, io.enq.map(_.bits.payload))
io.deq(i) <> q.io.deq
}
} else {
val mem = Mem(fullSize, new BaseFlit(cParam.payloadBits))
val heads = RegInit(VecInit(starts.map(_.U(log2Ceil(fullSize).W))))
val tails = RegInit(VecInit(starts.map(_.U(log2Ceil(fullSize).W))))
val empty = (heads zip tails).map(t => t._1 === t._2)
val qs = Seq.fill(nVirtualChannels) { Module(new Queue(new BaseFlit(cParam.payloadBits), 1, pipe=true)) }
qs.foreach(_.io.enq.valid := false.B)
qs.foreach(_.io.enq.bits := DontCare)
val vc_sel = UIntToOH(io.enq(0).bits.virt_channel_id)
val flit = Wire(new BaseFlit(cParam.payloadBits))
val direct_to_q = (Mux1H(vc_sel, qs.map(_.io.enq.ready)) && Mux1H(vc_sel, empty)) && useOutputQueues.B
flit.head := io.enq(0).bits.head
flit.tail := io.enq(0).bits.tail
flit.payload := io.enq(0).bits.payload
when (io.enq(0).valid && !direct_to_q) {
val tail = tails(io.enq(0).bits.virt_channel_id)
mem.write(tail, flit)
tails(io.enq(0).bits.virt_channel_id) := Mux(
tail === Mux1H(vc_sel, ends.map(_ - 1).map(_ max 0).map(_.U)),
Mux1H(vc_sel, starts.map(_.U)),
tail + 1.U)
} .elsewhen (io.enq(0).valid && direct_to_q) {
for (i <- 0 until nVirtualChannels) {
when (io.enq(0).bits.virt_channel_id === i.U) {
qs(i).io.enq.valid := true.B
qs(i).io.enq.bits := flit
}
}
}
if (useOutputQueues) {
val can_to_q = (0 until nVirtualChannels).map { i => !empty(i) && qs(i).io.enq.ready }
val to_q_oh = PriorityEncoderOH(can_to_q)
val to_q = OHToUInt(to_q_oh)
when (can_to_q.orR) {
val head = Mux1H(to_q_oh, heads)
heads(to_q) := Mux(
head === Mux1H(to_q_oh, ends.map(_ - 1).map(_ max 0).map(_.U)),
Mux1H(to_q_oh, starts.map(_.U)),
head + 1.U)
for (i <- 0 until nVirtualChannels) {
when (to_q_oh(i)) {
qs(i).io.enq.valid := true.B
qs(i).io.enq.bits := mem.read(head)
}
}
}
for (i <- 0 until nVirtualChannels) {
io.deq(i) <> qs(i).io.deq
}
} else {
qs.map(_.io.deq.ready := false.B)
val ready_sel = io.deq.map(_.ready)
val fire = io.deq.map(_.fire)
assert(PopCount(fire) <= 1.U)
val head = Mux1H(fire, heads)
when (fire.orR) {
val fire_idx = OHToUInt(fire)
heads(fire_idx) := Mux(
head === Mux1H(fire, ends.map(_ - 1).map(_ max 0).map(_.U)),
Mux1H(fire, starts.map(_.U)),
head + 1.U)
}
val read_flit = mem.read(head)
for (i <- 0 until nVirtualChannels) {
io.deq(i).valid := !empty(i)
io.deq(i).bits := read_flit
}
}
}
}
class InputUnit(cParam: ChannelParams, outParams: Seq[ChannelParams],
egressParams: Seq[EgressChannelParams],
combineRCVA: Boolean, combineSAST: Boolean
)
(implicit p: Parameters) extends AbstractInputUnit(cParam, outParams, egressParams)(p) {
val nVirtualChannels = cParam.nVirtualChannels
val virtualChannelParams = cParam.virtualChannelParams
class InputUnitIO extends AbstractInputUnitIO(cParam, outParams, egressParams) {
val in = Flipped(new Channel(cParam.asInstanceOf[ChannelParams]))
}
val io = IO(new InputUnitIO)
val g_i :: g_r :: g_v :: g_a :: g_c :: Nil = Enum(5)
class InputState extends Bundle {
val g = UInt(3.W)
val vc_sel = MixedVec(allOutParams.map { u => Vec(u.nVirtualChannels, Bool()) })
val flow = new FlowRoutingBundle
val fifo_deps = UInt(nVirtualChannels.W)
}
val input_buffer = Module(new InputBuffer(cParam))
for (i <- 0 until cParam.srcSpeedup) {
input_buffer.io.enq(i) := io.in.flit(i)
}
input_buffer.io.deq.foreach(_.ready := false.B)
val route_arbiter = Module(new Arbiter(
new RouteComputerReq, nVirtualChannels
))
io.router_req <> route_arbiter.io.out
val states = Reg(Vec(nVirtualChannels, new InputState))
val anyFifo = cParam.possibleFlows.map(_.fifo).reduce(_||_)
val allFifo = cParam.possibleFlows.map(_.fifo).reduce(_&&_)
if (anyFifo) {
val idle_mask = VecInit(states.map(_.g === g_i)).asUInt
for (s <- states)
for (i <- 0 until nVirtualChannels)
s.fifo_deps := s.fifo_deps & ~idle_mask
}
for (i <- 0 until cParam.srcSpeedup) {
when (io.in.flit(i).fire && io.in.flit(i).bits.head) {
val id = io.in.flit(i).bits.virt_channel_id
assert(id < nVirtualChannels.U)
assert(states(id).g === g_i)
val at_dest = io.in.flit(i).bits.flow.egress_node === nodeId.U
states(id).g := Mux(at_dest, g_v, g_r)
states(id).vc_sel.foreach(_.foreach(_ := false.B))
for (o <- 0 until nEgress) {
when (o.U === io.in.flit(i).bits.flow.egress_node_id) {
states(id).vc_sel(o+nOutputs)(0) := true.B
}
}
states(id).flow := io.in.flit(i).bits.flow
if (anyFifo) {
val fifo = cParam.possibleFlows.filter(_.fifo).map(_.isFlow(io.in.flit(i).bits.flow)).toSeq.orR
states(id).fifo_deps := VecInit(states.zipWithIndex.map { case (s, j) =>
s.g =/= g_i && s.flow.asUInt === io.in.flit(i).bits.flow.asUInt && j.U =/= id
}).asUInt
}
}
}
(route_arbiter.io.in zip states).zipWithIndex.map { case ((i,s),idx) =>
if (virtualChannelParams(idx).traversable) {
i.valid := s.g === g_r
i.bits.flow := s.flow
i.bits.src_virt_id := idx.U
when (i.fire) { s.g := g_v }
} else {
i.valid := false.B
i.bits := DontCare
}
}
when (io.router_req.fire) {
val id = io.router_req.bits.src_virt_id
assert(states(id).g === g_r)
states(id).g := g_v
for (i <- 0 until nVirtualChannels) {
when (i.U === id) {
states(i).vc_sel := io.router_resp.vc_sel
}
}
}
val mask = RegInit(0.U(nVirtualChannels.W))
val vcalloc_reqs = Wire(Vec(nVirtualChannels, new VCAllocReq(cParam, outParams, egressParams)))
val vcalloc_vals = Wire(Vec(nVirtualChannels, Bool()))
val vcalloc_filter = PriorityEncoderOH(Cat(vcalloc_vals.asUInt, vcalloc_vals.asUInt & ~mask))
val vcalloc_sel = vcalloc_filter(nVirtualChannels-1,0) | (vcalloc_filter >> nVirtualChannels)
// Prioritize incoming packetes
when (io.router_req.fire) {
mask := (1.U << io.router_req.bits.src_virt_id) - 1.U
} .elsewhen (vcalloc_vals.orR) {
mask := Mux1H(vcalloc_sel, (0 until nVirtualChannels).map { w => ~(0.U((w+1).W)) })
}
io.vcalloc_req.valid := vcalloc_vals.orR
io.vcalloc_req.bits := Mux1H(vcalloc_sel, vcalloc_reqs)
states.zipWithIndex.map { case (s,idx) =>
if (virtualChannelParams(idx).traversable) {
vcalloc_vals(idx) := s.g === g_v && s.fifo_deps === 0.U
vcalloc_reqs(idx).in_vc := idx.U
vcalloc_reqs(idx).vc_sel := s.vc_sel
vcalloc_reqs(idx).flow := s.flow
when (vcalloc_vals(idx) && vcalloc_sel(idx) && io.vcalloc_req.ready) { s.g := g_a }
if (combineRCVA) {
when (route_arbiter.io.in(idx).fire) {
vcalloc_vals(idx) := true.B
vcalloc_reqs(idx).vc_sel := io.router_resp.vc_sel
}
}
} else {
vcalloc_vals(idx) := false.B
vcalloc_reqs(idx) := DontCare
}
}
io.debug.va_stall := PopCount(vcalloc_vals) - io.vcalloc_req.ready
when (io.vcalloc_req.fire) {
for (i <- 0 until nVirtualChannels) {
when (vcalloc_sel(i)) {
states(i).vc_sel := io.vcalloc_resp.vc_sel
states(i).g := g_a
if (!combineRCVA) {
assert(states(i).g === g_v)
}
}
}
}
val salloc_arb = Module(new SwitchArbiter(
nVirtualChannels,
cParam.destSpeedup,
outParams, egressParams
))
(states zip salloc_arb.io.in).zipWithIndex.map { case ((s,r),i) =>
if (virtualChannelParams(i).traversable) {
val credit_available = (s.vc_sel.asUInt & io.out_credit_available.asUInt) =/= 0.U
r.valid := s.g === g_a && credit_available && input_buffer.io.deq(i).valid
r.bits.vc_sel := s.vc_sel
val deq_tail = input_buffer.io.deq(i).bits.tail
r.bits.tail := deq_tail
when (r.fire && deq_tail) {
s.g := g_i
}
input_buffer.io.deq(i).ready := r.ready
} else {
r.valid := false.B
r.bits := DontCare
}
}
io.debug.sa_stall := PopCount(salloc_arb.io.in.map(r => r.valid && !r.ready))
io.salloc_req <> salloc_arb.io.out
when (io.block) {
salloc_arb.io.out.foreach(_.ready := false.B)
io.salloc_req.foreach(_.valid := false.B)
}
class OutBundle extends Bundle {
val valid = Bool()
val vid = UInt(virtualChannelBits.W)
val out_vid = UInt(log2Up(allOutParams.map(_.nVirtualChannels).max).W)
val flit = new Flit(cParam.payloadBits)
}
val salloc_outs = if (combineSAST) {
Wire(Vec(cParam.destSpeedup, new OutBundle))
} else {
Reg(Vec(cParam.destSpeedup, new OutBundle))
}
io.in.credit_return := salloc_arb.io.out.zipWithIndex.map { case (o, i) =>
Mux(o.fire, salloc_arb.io.chosen_oh(i), 0.U)
}.reduce(_|_)
io.in.vc_free := salloc_arb.io.out.zipWithIndex.map { case (o, i) =>
Mux(o.fire && Mux1H(salloc_arb.io.chosen_oh(i), input_buffer.io.deq.map(_.bits.tail)),
salloc_arb.io.chosen_oh(i), 0.U)
}.reduce(_|_)
for (i <- 0 until cParam.destSpeedup) {
val salloc_out = salloc_outs(i)
salloc_out.valid := salloc_arb.io.out(i).fire
salloc_out.vid := OHToUInt(salloc_arb.io.chosen_oh(i))
val vc_sel = Mux1H(salloc_arb.io.chosen_oh(i), states.map(_.vc_sel))
val channel_oh = vc_sel.map(_.reduce(_||_)).toSeq
val virt_channel = Mux1H(channel_oh, vc_sel.map(v => OHToUInt(v)).toSeq)
when (salloc_arb.io.out(i).fire) {
salloc_out.out_vid := virt_channel
salloc_out.flit.payload := Mux1H(salloc_arb.io.chosen_oh(i), input_buffer.io.deq.map(_.bits.payload))
salloc_out.flit.head := Mux1H(salloc_arb.io.chosen_oh(i), input_buffer.io.deq.map(_.bits.head))
salloc_out.flit.tail := Mux1H(salloc_arb.io.chosen_oh(i), input_buffer.io.deq.map(_.bits.tail))
salloc_out.flit.flow := Mux1H(salloc_arb.io.chosen_oh(i), states.map(_.flow))
} .otherwise {
salloc_out.out_vid := DontCare
salloc_out.flit := DontCare
}
salloc_out.flit.virt_channel_id := DontCare // this gets set in the switch
io.out(i).valid := salloc_out.valid
io.out(i).bits.flit := salloc_out.flit
io.out(i).bits.out_virt_channel := salloc_out.out_vid
}
def filterVCSel(sel: MixedVec[Vec[Bool]], srcV: Int) = {
if (virtualChannelParams(srcV).traversable) {
outParams.zipWithIndex.map { case (oP, oI) =>
(0 until oP.nVirtualChannels).map { oV =>
var allow = false
virtualChannelParams(srcV).possibleFlows.foreach { pI =>
allow = allow || routingRelation(
cParam.channelRoutingInfos(srcV),
oP.channelRoutingInfos(oV),
pI
)
}
if (!allow)
sel(oI)(oV) := false.B
}
}
}
}
(0 until nVirtualChannels).map { i =>
if (!virtualChannelParams(i).traversable) states(i) := DontCare
filterVCSel(states(i).vc_sel, i)
}
when (reset.asBool) {
states.foreach(_.g := g_i)
}
}
| module InputUnit_14( // @[InputUnit.scala:158:7]
input clock, // @[InputUnit.scala:158:7]
input reset, // @[InputUnit.scala:158:7]
output [2:0] io_router_req_bits_src_virt_id, // @[InputUnit.scala:170:14]
output [2:0] io_router_req_bits_flow_vnet_id, // @[InputUnit.scala:170:14]
output [4:0] io_router_req_bits_flow_ingress_node, // @[InputUnit.scala:170:14]
output [1:0] io_router_req_bits_flow_ingress_node_id, // @[InputUnit.scala:170:14]
output [4:0] io_router_req_bits_flow_egress_node, // @[InputUnit.scala:170:14]
output [1:0] io_router_req_bits_flow_egress_node_id, // @[InputUnit.scala:170:14]
input io_router_resp_vc_sel_2_5, // @[InputUnit.scala:170:14]
input io_router_resp_vc_sel_2_6, // @[InputUnit.scala:170:14]
input io_router_resp_vc_sel_2_7, // @[InputUnit.scala:170:14]
input io_router_resp_vc_sel_1_4, // @[InputUnit.scala:170:14]
input io_router_resp_vc_sel_1_5, // @[InputUnit.scala:170:14]
input io_router_resp_vc_sel_1_6, // @[InputUnit.scala:170:14]
input io_router_resp_vc_sel_1_7, // @[InputUnit.scala:170:14]
input io_vcalloc_req_ready, // @[InputUnit.scala:170:14]
output io_vcalloc_req_valid, // @[InputUnit.scala:170:14]
output io_vcalloc_req_bits_vc_sel_2_5, // @[InputUnit.scala:170:14]
output io_vcalloc_req_bits_vc_sel_2_6, // @[InputUnit.scala:170:14]
output io_vcalloc_req_bits_vc_sel_2_7, // @[InputUnit.scala:170:14]
output io_vcalloc_req_bits_vc_sel_1_4, // @[InputUnit.scala:170:14]
output io_vcalloc_req_bits_vc_sel_1_5, // @[InputUnit.scala:170:14]
output io_vcalloc_req_bits_vc_sel_1_6, // @[InputUnit.scala:170:14]
output io_vcalloc_req_bits_vc_sel_1_7, // @[InputUnit.scala:170:14]
input io_vcalloc_resp_vc_sel_2_5, // @[InputUnit.scala:170:14]
input io_vcalloc_resp_vc_sel_2_6, // @[InputUnit.scala:170:14]
input io_vcalloc_resp_vc_sel_2_7, // @[InputUnit.scala:170:14]
input io_vcalloc_resp_vc_sel_1_4, // @[InputUnit.scala:170:14]
input io_vcalloc_resp_vc_sel_1_5, // @[InputUnit.scala:170:14]
input io_vcalloc_resp_vc_sel_1_6, // @[InputUnit.scala:170:14]
input io_vcalloc_resp_vc_sel_1_7, // @[InputUnit.scala:170:14]
input io_out_credit_available_2_1, // @[InputUnit.scala:170:14]
input io_out_credit_available_2_2, // @[InputUnit.scala:170:14]
input io_out_credit_available_2_3, // @[InputUnit.scala:170:14]
input io_out_credit_available_2_4, // @[InputUnit.scala:170:14]
input io_out_credit_available_2_5, // @[InputUnit.scala:170:14]
input io_out_credit_available_2_6, // @[InputUnit.scala:170:14]
input io_out_credit_available_2_7, // @[InputUnit.scala:170:14]
input io_out_credit_available_1_4, // @[InputUnit.scala:170:14]
input io_out_credit_available_1_5, // @[InputUnit.scala:170:14]
input io_out_credit_available_1_6, // @[InputUnit.scala:170:14]
input io_out_credit_available_1_7, // @[InputUnit.scala:170:14]
input io_out_credit_available_0_1, // @[InputUnit.scala:170:14]
input io_out_credit_available_0_2, // @[InputUnit.scala:170:14]
input io_out_credit_available_0_3, // @[InputUnit.scala:170:14]
input io_out_credit_available_0_4, // @[InputUnit.scala:170:14]
input io_out_credit_available_0_5, // @[InputUnit.scala:170:14]
input io_out_credit_available_0_6, // @[InputUnit.scala:170:14]
input io_out_credit_available_0_7, // @[InputUnit.scala:170:14]
input io_salloc_req_0_ready, // @[InputUnit.scala:170:14]
output io_salloc_req_0_valid, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_2_0, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_2_1, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_2_2, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_2_3, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_2_4, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_2_5, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_2_6, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_2_7, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_1_0, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_1_1, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_1_2, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_1_3, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_1_4, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_1_5, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_1_6, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_1_7, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_0_1, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_0_2, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_0_3, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_0_4, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_0_5, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_0_6, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_0_7, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_tail, // @[InputUnit.scala:170:14]
output io_out_0_valid, // @[InputUnit.scala:170:14]
output io_out_0_bits_flit_head, // @[InputUnit.scala:170:14]
output io_out_0_bits_flit_tail, // @[InputUnit.scala:170:14]
output [72:0] io_out_0_bits_flit_payload, // @[InputUnit.scala:170:14]
output [2:0] io_out_0_bits_flit_flow_vnet_id, // @[InputUnit.scala:170:14]
output [4:0] io_out_0_bits_flit_flow_ingress_node, // @[InputUnit.scala:170:14]
output [1:0] io_out_0_bits_flit_flow_ingress_node_id, // @[InputUnit.scala:170:14]
output [4:0] io_out_0_bits_flit_flow_egress_node, // @[InputUnit.scala:170:14]
output [1:0] io_out_0_bits_flit_flow_egress_node_id, // @[InputUnit.scala:170:14]
output [2:0] io_out_0_bits_out_virt_channel, // @[InputUnit.scala:170:14]
output [2:0] io_debug_va_stall, // @[InputUnit.scala:170:14]
output [2:0] io_debug_sa_stall, // @[InputUnit.scala:170:14]
input io_in_flit_0_valid, // @[InputUnit.scala:170:14]
input io_in_flit_0_bits_head, // @[InputUnit.scala:170:14]
input io_in_flit_0_bits_tail, // @[InputUnit.scala:170:14]
input [72:0] io_in_flit_0_bits_payload, // @[InputUnit.scala:170:14]
input [2:0] io_in_flit_0_bits_flow_vnet_id, // @[InputUnit.scala:170:14]
input [4:0] io_in_flit_0_bits_flow_ingress_node, // @[InputUnit.scala:170:14]
input [1:0] io_in_flit_0_bits_flow_ingress_node_id, // @[InputUnit.scala:170:14]
input [4:0] io_in_flit_0_bits_flow_egress_node, // @[InputUnit.scala:170:14]
input [1:0] io_in_flit_0_bits_flow_egress_node_id, // @[InputUnit.scala:170:14]
input [2:0] io_in_flit_0_bits_virt_channel_id, // @[InputUnit.scala:170:14]
output [7:0] io_in_credit_return, // @[InputUnit.scala:170:14]
output [7:0] io_in_vc_free // @[InputUnit.scala:170:14]
);
wire vcalloc_vals_7; // @[InputUnit.scala:266:32]
wire vcalloc_vals_6; // @[InputUnit.scala:266:32]
wire vcalloc_vals_5; // @[InputUnit.scala:266:32]
wire vcalloc_vals_4; // @[InputUnit.scala:266:32]
wire _salloc_arb_io_in_4_ready; // @[InputUnit.scala:296:26]
wire _salloc_arb_io_in_5_ready; // @[InputUnit.scala:296:26]
wire _salloc_arb_io_in_6_ready; // @[InputUnit.scala:296:26]
wire _salloc_arb_io_in_7_ready; // @[InputUnit.scala:296:26]
wire _salloc_arb_io_out_0_valid; // @[InputUnit.scala:296:26]
wire [7:0] _salloc_arb_io_chosen_oh_0; // @[InputUnit.scala:296:26]
wire _route_arbiter_io_in_4_ready; // @[InputUnit.scala:187:29]
wire _route_arbiter_io_in_5_ready; // @[InputUnit.scala:187:29]
wire _route_arbiter_io_in_6_ready; // @[InputUnit.scala:187:29]
wire _route_arbiter_io_in_7_ready; // @[InputUnit.scala:187:29]
wire _route_arbiter_io_out_valid; // @[InputUnit.scala:187:29]
wire [2:0] _route_arbiter_io_out_bits_src_virt_id; // @[InputUnit.scala:187:29]
wire _input_buffer_io_deq_0_bits_head; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_0_bits_tail; // @[InputUnit.scala:181:28]
wire [72:0] _input_buffer_io_deq_0_bits_payload; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_1_bits_head; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_1_bits_tail; // @[InputUnit.scala:181:28]
wire [72:0] _input_buffer_io_deq_1_bits_payload; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_2_bits_head; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_2_bits_tail; // @[InputUnit.scala:181:28]
wire [72:0] _input_buffer_io_deq_2_bits_payload; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_3_bits_head; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_3_bits_tail; // @[InputUnit.scala:181:28]
wire [72:0] _input_buffer_io_deq_3_bits_payload; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_4_valid; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_4_bits_head; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_4_bits_tail; // @[InputUnit.scala:181:28]
wire [72:0] _input_buffer_io_deq_4_bits_payload; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_5_valid; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_5_bits_head; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_5_bits_tail; // @[InputUnit.scala:181:28]
wire [72:0] _input_buffer_io_deq_5_bits_payload; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_6_valid; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_6_bits_head; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_6_bits_tail; // @[InputUnit.scala:181:28]
wire [72:0] _input_buffer_io_deq_6_bits_payload; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_7_valid; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_7_bits_head; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_7_bits_tail; // @[InputUnit.scala:181:28]
wire [72:0] _input_buffer_io_deq_7_bits_payload; // @[InputUnit.scala:181:28]
reg [2:0] states_4_g; // @[InputUnit.scala:192:19]
reg states_4_vc_sel_2_5; // @[InputUnit.scala:192:19]
reg states_4_vc_sel_2_6; // @[InputUnit.scala:192:19]
reg states_4_vc_sel_2_7; // @[InputUnit.scala:192:19]
reg states_4_vc_sel_1_4; // @[InputUnit.scala:192:19]
reg states_4_vc_sel_1_5; // @[InputUnit.scala:192:19]
reg states_4_vc_sel_1_6; // @[InputUnit.scala:192:19]
reg states_4_vc_sel_1_7; // @[InputUnit.scala:192:19]
reg [2:0] states_4_flow_vnet_id; // @[InputUnit.scala:192:19]
reg [4:0] states_4_flow_ingress_node; // @[InputUnit.scala:192:19]
reg [1:0] states_4_flow_ingress_node_id; // @[InputUnit.scala:192:19]
reg [4:0] states_4_flow_egress_node; // @[InputUnit.scala:192:19]
reg [1:0] states_4_flow_egress_node_id; // @[InputUnit.scala:192:19]
reg [2:0] states_5_g; // @[InputUnit.scala:192:19]
reg states_5_vc_sel_2_5; // @[InputUnit.scala:192:19]
reg states_5_vc_sel_2_6; // @[InputUnit.scala:192:19]
reg states_5_vc_sel_2_7; // @[InputUnit.scala:192:19]
reg states_5_vc_sel_1_4; // @[InputUnit.scala:192:19]
reg states_5_vc_sel_1_5; // @[InputUnit.scala:192:19]
reg states_5_vc_sel_1_6; // @[InputUnit.scala:192:19]
reg states_5_vc_sel_1_7; // @[InputUnit.scala:192:19]
reg [2:0] states_5_flow_vnet_id; // @[InputUnit.scala:192:19]
reg [4:0] states_5_flow_ingress_node; // @[InputUnit.scala:192:19]
reg [1:0] states_5_flow_ingress_node_id; // @[InputUnit.scala:192:19]
reg [4:0] states_5_flow_egress_node; // @[InputUnit.scala:192:19]
reg [1:0] states_5_flow_egress_node_id; // @[InputUnit.scala:192:19]
reg [2:0] states_6_g; // @[InputUnit.scala:192:19]
reg states_6_vc_sel_2_5; // @[InputUnit.scala:192:19]
reg states_6_vc_sel_2_6; // @[InputUnit.scala:192:19]
reg states_6_vc_sel_2_7; // @[InputUnit.scala:192:19]
reg states_6_vc_sel_1_4; // @[InputUnit.scala:192:19]
reg states_6_vc_sel_1_5; // @[InputUnit.scala:192:19]
reg states_6_vc_sel_1_6; // @[InputUnit.scala:192:19]
reg states_6_vc_sel_1_7; // @[InputUnit.scala:192:19]
reg [2:0] states_6_flow_vnet_id; // @[InputUnit.scala:192:19]
reg [4:0] states_6_flow_ingress_node; // @[InputUnit.scala:192:19]
reg [1:0] states_6_flow_ingress_node_id; // @[InputUnit.scala:192:19]
reg [4:0] states_6_flow_egress_node; // @[InputUnit.scala:192:19]
reg [1:0] states_6_flow_egress_node_id; // @[InputUnit.scala:192:19]
reg [2:0] states_7_g; // @[InputUnit.scala:192:19]
reg states_7_vc_sel_2_5; // @[InputUnit.scala:192:19]
reg states_7_vc_sel_2_6; // @[InputUnit.scala:192:19]
reg states_7_vc_sel_2_7; // @[InputUnit.scala:192:19]
reg states_7_vc_sel_1_4; // @[InputUnit.scala:192:19]
reg states_7_vc_sel_1_5; // @[InputUnit.scala:192:19]
reg states_7_vc_sel_1_6; // @[InputUnit.scala:192:19]
reg states_7_vc_sel_1_7; // @[InputUnit.scala:192:19]
reg [2:0] states_7_flow_vnet_id; // @[InputUnit.scala:192:19]
reg [4:0] states_7_flow_ingress_node; // @[InputUnit.scala:192:19]
reg [1:0] states_7_flow_ingress_node_id; // @[InputUnit.scala:192:19]
reg [4:0] states_7_flow_egress_node; // @[InputUnit.scala:192:19]
reg [1:0] states_7_flow_egress_node_id; // @[InputUnit.scala:192:19]
wire _GEN = io_in_flit_0_valid & io_in_flit_0_bits_head; // @[InputUnit.scala:205:30]
wire route_arbiter_io_in_4_valid = states_4_g == 3'h1; // @[InputUnit.scala:192:19, :229:22]
wire route_arbiter_io_in_5_valid = states_5_g == 3'h1; // @[InputUnit.scala:192:19, :229:22]
wire route_arbiter_io_in_6_valid = states_6_g == 3'h1; // @[InputUnit.scala:192:19, :229:22]
wire route_arbiter_io_in_7_valid = states_7_g == 3'h1; // @[InputUnit.scala:192:19, :229:22]
reg [7:0] mask; // @[InputUnit.scala:250:21]
wire [7:0] _vcalloc_filter_T_3 = {vcalloc_vals_7, vcalloc_vals_6, vcalloc_vals_5, vcalloc_vals_4, 4'h0} & ~mask; // @[InputUnit.scala:250:21, :253:{80,87,89}, :266:32]
wire [15:0] vcalloc_filter = _vcalloc_filter_T_3[0] ? 16'h1 : _vcalloc_filter_T_3[1] ? 16'h2 : _vcalloc_filter_T_3[2] ? 16'h4 : _vcalloc_filter_T_3[3] ? 16'h8 : _vcalloc_filter_T_3[4] ? 16'h10 : _vcalloc_filter_T_3[5] ? 16'h20 : _vcalloc_filter_T_3[6] ? 16'h40 : _vcalloc_filter_T_3[7] ? 16'h80 : vcalloc_vals_4 ? 16'h1000 : vcalloc_vals_5 ? 16'h2000 : vcalloc_vals_6 ? 16'h4000 : {vcalloc_vals_7, 15'h0}; // @[OneHot.scala:85:71]
wire [7:0] vcalloc_sel = vcalloc_filter[7:0] | vcalloc_filter[15:8]; // @[Mux.scala:50:70]
wire io_vcalloc_req_valid_0 = vcalloc_vals_4 | vcalloc_vals_5 | vcalloc_vals_6 | vcalloc_vals_7; // @[package.scala:81:59]
assign vcalloc_vals_4 = states_4_g == 3'h2; // @[InputUnit.scala:192:19, :266:32]
assign vcalloc_vals_5 = states_5_g == 3'h2; // @[InputUnit.scala:192:19, :266:32]
assign vcalloc_vals_6 = states_6_g == 3'h2; // @[InputUnit.scala:192:19, :266:32]
assign vcalloc_vals_7 = states_7_g == 3'h2; // @[InputUnit.scala:192:19, :266:32]
wire _GEN_0 = io_vcalloc_req_ready & io_vcalloc_req_valid_0; // @[Decoupled.scala:51:35]
wire _GEN_1 = _GEN_0 & vcalloc_sel[4]; // @[Mux.scala:32:36]
wire _GEN_2 = _GEN_0 & vcalloc_sel[5]; // @[Mux.scala:32:36]
wire _GEN_3 = _GEN_0 & vcalloc_sel[6]; // @[Mux.scala:32:36]
wire _GEN_4 = _GEN_0 & vcalloc_sel[7]; // @[Mux.scala:32:36] |
Generate the Verilog code corresponding to the following Chisel files.
File PE.scala:
// See README.md for license details.
package gemmini
import chisel3._
import chisel3.util._
class PEControl[T <: Data : Arithmetic](accType: T) extends Bundle {
val dataflow = UInt(1.W) // TODO make this an Enum
val propagate = UInt(1.W) // Which register should be propagated (and which should be accumulated)?
val shift = UInt(log2Up(accType.getWidth).W) // TODO this isn't correct for Floats
}
class MacUnit[T <: Data](inputType: T, cType: T, dType: T) (implicit ev: Arithmetic[T]) extends Module {
import ev._
val io = IO(new Bundle {
val in_a = Input(inputType)
val in_b = Input(inputType)
val in_c = Input(cType)
val out_d = Output(dType)
})
io.out_d := io.in_c.mac(io.in_a, io.in_b)
}
// TODO update documentation
/**
* A PE implementing a MAC operation. Configured as fully combinational when integrated into a Mesh.
* @param width Data width of operands
*/
class PE[T <: Data](inputType: T, outputType: T, accType: T, df: Dataflow.Value, max_simultaneous_matmuls: Int)
(implicit ev: Arithmetic[T]) extends Module { // Debugging variables
import ev._
val io = IO(new Bundle {
val in_a = Input(inputType)
val in_b = Input(outputType)
val in_d = Input(outputType)
val out_a = Output(inputType)
val out_b = Output(outputType)
val out_c = Output(outputType)
val in_control = Input(new PEControl(accType))
val out_control = Output(new PEControl(accType))
val in_id = Input(UInt(log2Up(max_simultaneous_matmuls).W))
val out_id = Output(UInt(log2Up(max_simultaneous_matmuls).W))
val in_last = Input(Bool())
val out_last = Output(Bool())
val in_valid = Input(Bool())
val out_valid = Output(Bool())
val bad_dataflow = Output(Bool())
})
val cType = if (df == Dataflow.WS) inputType else accType
// When creating PEs that support multiple dataflows, the
// elaboration/synthesis tools often fail to consolidate and de-duplicate
// MAC units. To force mac circuitry to be re-used, we create a "mac_unit"
// module here which just performs a single MAC operation
val mac_unit = Module(new MacUnit(inputType,
if (df == Dataflow.WS) outputType else accType, outputType))
val a = io.in_a
val b = io.in_b
val d = io.in_d
val c1 = Reg(cType)
val c2 = Reg(cType)
val dataflow = io.in_control.dataflow
val prop = io.in_control.propagate
val shift = io.in_control.shift
val id = io.in_id
val last = io.in_last
val valid = io.in_valid
io.out_a := a
io.out_control.dataflow := dataflow
io.out_control.propagate := prop
io.out_control.shift := shift
io.out_id := id
io.out_last := last
io.out_valid := valid
mac_unit.io.in_a := a
val last_s = RegEnable(prop, valid)
val flip = last_s =/= prop
val shift_offset = Mux(flip, shift, 0.U)
// Which dataflow are we using?
val OUTPUT_STATIONARY = Dataflow.OS.id.U(1.W)
val WEIGHT_STATIONARY = Dataflow.WS.id.U(1.W)
// Is c1 being computed on, or propagated forward (in the output-stationary dataflow)?
val COMPUTE = 0.U(1.W)
val PROPAGATE = 1.U(1.W)
io.bad_dataflow := false.B
when ((df == Dataflow.OS).B || ((df == Dataflow.BOTH).B && dataflow === OUTPUT_STATIONARY)) {
when(prop === PROPAGATE) {
io.out_c := (c1 >> shift_offset).clippedToWidthOf(outputType)
io.out_b := b
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c2
c2 := mac_unit.io.out_d
c1 := d.withWidthOf(cType)
}.otherwise {
io.out_c := (c2 >> shift_offset).clippedToWidthOf(outputType)
io.out_b := b
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c1
c1 := mac_unit.io.out_d
c2 := d.withWidthOf(cType)
}
}.elsewhen ((df == Dataflow.WS).B || ((df == Dataflow.BOTH).B && dataflow === WEIGHT_STATIONARY)) {
when(prop === PROPAGATE) {
io.out_c := c1
mac_unit.io.in_b := c2.asTypeOf(inputType)
mac_unit.io.in_c := b
io.out_b := mac_unit.io.out_d
c1 := d
}.otherwise {
io.out_c := c2
mac_unit.io.in_b := c1.asTypeOf(inputType)
mac_unit.io.in_c := b
io.out_b := mac_unit.io.out_d
c2 := d
}
}.otherwise {
io.bad_dataflow := true.B
//assert(false.B, "unknown dataflow")
io.out_c := DontCare
io.out_b := DontCare
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c2
}
when (!valid) {
c1 := c1
c2 := c2
mac_unit.io.in_b := DontCare
mac_unit.io.in_c := DontCare
}
}
File Arithmetic.scala:
// A simple type class for Chisel datatypes that can add and multiply. To add your own type, simply create your own:
// implicit MyTypeArithmetic extends Arithmetic[MyType] { ... }
package gemmini
import chisel3._
import chisel3.util._
import hardfloat._
// Bundles that represent the raw bits of custom datatypes
case class Float(expWidth: Int, sigWidth: Int) extends Bundle {
val bits = UInt((expWidth + sigWidth).W)
val bias: Int = (1 << (expWidth-1)) - 1
}
case class DummySInt(w: Int) extends Bundle {
val bits = UInt(w.W)
def dontCare: DummySInt = {
val o = Wire(new DummySInt(w))
o.bits := 0.U
o
}
}
// The Arithmetic typeclass which implements various arithmetic operations on custom datatypes
abstract class Arithmetic[T <: Data] {
implicit def cast(t: T): ArithmeticOps[T]
}
abstract class ArithmeticOps[T <: Data](self: T) {
def *(t: T): T
def mac(m1: T, m2: T): T // Returns (m1 * m2 + self)
def +(t: T): T
def -(t: T): T
def >>(u: UInt): T // This is a rounding shift! Rounds away from 0
def >(t: T): Bool
def identity: T
def withWidthOf(t: T): T
def clippedToWidthOf(t: T): T // Like "withWidthOf", except that it saturates
def relu: T
def zero: T
def minimum: T
// Optional parameters, which only need to be defined if you want to enable various optimizations for transformers
def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[T])] = None
def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[T])] = None
def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = None
def mult_with_reciprocal[U <: Data](reciprocal: U) = self
}
object Arithmetic {
implicit object UIntArithmetic extends Arithmetic[UInt] {
override implicit def cast(self: UInt) = new ArithmeticOps(self) {
override def *(t: UInt) = self * t
override def mac(m1: UInt, m2: UInt) = m1 * m2 + self
override def +(t: UInt) = self + t
override def -(t: UInt) = self - t
override def >>(u: UInt) = {
// The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm
// TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here?
val point_five = Mux(u === 0.U, 0.U, self(u - 1.U))
val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U
val ones_digit = self(u)
val r = point_five & (zeros | ones_digit)
(self >> u).asUInt + r
}
override def >(t: UInt): Bool = self > t
override def withWidthOf(t: UInt) = self.asTypeOf(t)
override def clippedToWidthOf(t: UInt) = {
val sat = ((1 << (t.getWidth-1))-1).U
Mux(self > sat, sat, self)(t.getWidth-1, 0)
}
override def relu: UInt = self
override def zero: UInt = 0.U
override def identity: UInt = 1.U
override def minimum: UInt = 0.U
}
}
implicit object SIntArithmetic extends Arithmetic[SInt] {
override implicit def cast(self: SInt) = new ArithmeticOps(self) {
override def *(t: SInt) = self * t
override def mac(m1: SInt, m2: SInt) = m1 * m2 + self
override def +(t: SInt) = self + t
override def -(t: SInt) = self - t
override def >>(u: UInt) = {
// The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm
// TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here?
val point_five = Mux(u === 0.U, 0.U, self(u - 1.U))
val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U
val ones_digit = self(u)
val r = (point_five & (zeros | ones_digit)).asBool
(self >> u).asSInt + Mux(r, 1.S, 0.S)
}
override def >(t: SInt): Bool = self > t
override def withWidthOf(t: SInt) = {
if (self.getWidth >= t.getWidth)
self(t.getWidth-1, 0).asSInt
else {
val sign_bits = t.getWidth - self.getWidth
val sign = self(self.getWidth-1)
Cat(Cat(Seq.fill(sign_bits)(sign)), self).asTypeOf(t)
}
}
override def clippedToWidthOf(t: SInt): SInt = {
val maxsat = ((1 << (t.getWidth-1))-1).S
val minsat = (-(1 << (t.getWidth-1))).S
MuxCase(self, Seq((self > maxsat) -> maxsat, (self < minsat) -> minsat))(t.getWidth-1, 0).asSInt
}
override def relu: SInt = Mux(self >= 0.S, self, 0.S)
override def zero: SInt = 0.S
override def identity: SInt = 1.S
override def minimum: SInt = (-(1 << (self.getWidth-1))).S
override def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = {
// TODO this uses a floating point divider, but we should use an integer divider instead
val input = Wire(Decoupled(denom_t.cloneType))
val output = Wire(Decoupled(self.cloneType))
// We translate our integer to floating-point form so that we can use the hardfloat divider
val expWidth = log2Up(self.getWidth) + 1
val sigWidth = self.getWidth
def sin_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def uin_to_float(x: UInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := false.B
in_to_rec_fn.io.in := x
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = sin_to_float(self)
val denom_rec = uin_to_float(input.bits)
// Instantiate the hardloat divider
val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options))
input.ready := divider.io.inReady
divider.io.inValid := input.valid
divider.io.sqrtOp := false.B
divider.io.a := self_rec
divider.io.b := denom_rec
divider.io.roundingMode := consts.round_minMag
divider.io.detectTininess := consts.tininess_afterRounding
output.valid := divider.io.outValid_div
output.bits := float_to_in(divider.io.out)
assert(!output.valid || output.ready)
Some((input, output))
}
override def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = {
// TODO this uses a floating point divider, but we should use an integer divider instead
val input = Wire(Decoupled(UInt(0.W)))
val output = Wire(Decoupled(self.cloneType))
input.bits := DontCare
// We translate our integer to floating-point form so that we can use the hardfloat divider
val expWidth = log2Up(self.getWidth) + 1
val sigWidth = self.getWidth
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = in_to_float(self)
// Instantiate the hardloat sqrt
val sqrter = Module(new DivSqrtRecFN_small(expWidth, sigWidth, 0))
input.ready := sqrter.io.inReady
sqrter.io.inValid := input.valid
sqrter.io.sqrtOp := true.B
sqrter.io.a := self_rec
sqrter.io.b := DontCare
sqrter.io.roundingMode := consts.round_minMag
sqrter.io.detectTininess := consts.tininess_afterRounding
output.valid := sqrter.io.outValid_sqrt
output.bits := float_to_in(sqrter.io.out)
assert(!output.valid || output.ready)
Some((input, output))
}
override def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = u match {
case Float(expWidth, sigWidth) =>
val input = Wire(Decoupled(UInt(0.W)))
val output = Wire(Decoupled(u.cloneType))
input.bits := DontCare
// We translate our integer to floating-point form so that we can use the hardfloat divider
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
val self_rec = in_to_float(self)
val one_rec = in_to_float(1.S)
// Instantiate the hardloat divider
val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options))
input.ready := divider.io.inReady
divider.io.inValid := input.valid
divider.io.sqrtOp := false.B
divider.io.a := one_rec
divider.io.b := self_rec
divider.io.roundingMode := consts.round_near_even
divider.io.detectTininess := consts.tininess_afterRounding
output.valid := divider.io.outValid_div
output.bits := fNFromRecFN(expWidth, sigWidth, divider.io.out).asTypeOf(u)
assert(!output.valid || output.ready)
Some((input, output))
case _ => None
}
override def mult_with_reciprocal[U <: Data](reciprocal: U): SInt = reciprocal match {
case recip @ Float(expWidth, sigWidth) =>
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = in_to_float(self)
val reciprocal_rec = recFNFromFN(expWidth, sigWidth, recip.bits)
// Instantiate the hardloat divider
val muladder = Module(new MulRecFN(expWidth, sigWidth))
muladder.io.roundingMode := consts.round_near_even
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := reciprocal_rec
float_to_in(muladder.io.out)
case _ => self
}
}
}
implicit object FloatArithmetic extends Arithmetic[Float] {
// TODO Floating point arithmetic currently switches between recoded and standard formats for every operation. However, it should stay in the recoded format as it travels through the systolic array
override implicit def cast(self: Float): ArithmeticOps[Float] = new ArithmeticOps(self) {
override def *(t: Float): Float = {
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth))
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := t_rec_resized
val out = Wire(Float(self.expWidth, self.sigWidth))
out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
out
}
override def mac(m1: Float, m2: Float): Float = {
// Recode all operands
val m1_rec = recFNFromFN(m1.expWidth, m1.sigWidth, m1.bits)
val m2_rec = recFNFromFN(m2.expWidth, m2.sigWidth, m2.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Resize m1 to self's width
val m1_resizer = Module(new RecFNToRecFN(m1.expWidth, m1.sigWidth, self.expWidth, self.sigWidth))
m1_resizer.io.in := m1_rec
m1_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
m1_resizer.io.detectTininess := consts.tininess_afterRounding
val m1_rec_resized = m1_resizer.io.out
// Resize m2 to self's width
val m2_resizer = Module(new RecFNToRecFN(m2.expWidth, m2.sigWidth, self.expWidth, self.sigWidth))
m2_resizer.io.in := m2_rec
m2_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
m2_resizer.io.detectTininess := consts.tininess_afterRounding
val m2_rec_resized = m2_resizer.io.out
// Perform multiply-add
val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth))
muladder.io.op := 0.U
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := m1_rec_resized
muladder.io.b := m2_rec_resized
muladder.io.c := self_rec
// Convert result to standard format // TODO remove these intermediate recodings
val out = Wire(Float(self.expWidth, self.sigWidth))
out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
out
}
override def +(t: Float): Float = {
require(self.getWidth >= t.getWidth) // This just makes it easier to write the resizing code
// Recode all operands
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Generate 1 as a float
val in_to_rec_fn = Module(new INToRecFN(1, self.expWidth, self.sigWidth))
in_to_rec_fn.io.signedIn := false.B
in_to_rec_fn.io.in := 1.U
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
val one_rec = in_to_rec_fn.io.out
// Resize t
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
// Perform addition
val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth))
muladder.io.op := 0.U
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := t_rec_resized
muladder.io.b := one_rec
muladder.io.c := self_rec
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
result
}
override def -(t: Float): Float = {
val t_sgn = t.bits(t.getWidth-1)
val neg_t = Cat(~t_sgn, t.bits(t.getWidth-2,0)).asTypeOf(t)
self + neg_t
}
override def >>(u: UInt): Float = {
// Recode self
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Get 2^(-u) as a recoded float
val shift_exp = Wire(UInt(self.expWidth.W))
shift_exp := self.bias.U - u
val shift_fn = Cat(0.U(1.W), shift_exp, 0.U((self.sigWidth-1).W))
val shift_rec = recFNFromFN(self.expWidth, self.sigWidth, shift_fn)
assert(shift_exp =/= 0.U, "scaling by denormalized numbers is not currently supported")
// Multiply self and 2^(-u)
val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth))
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := shift_rec
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
result
}
override def >(t: Float): Bool = {
// Recode all operands
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Resize t to self's width
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
val comparator = Module(new CompareRecFN(self.expWidth, self.sigWidth))
comparator.io.a := self_rec
comparator.io.b := t_rec_resized
comparator.io.signaling := false.B
comparator.io.gt
}
override def withWidthOf(t: Float): Float = {
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth))
resizer.io.in := self_rec
resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
resizer.io.detectTininess := consts.tininess_afterRounding
val result = Wire(Float(t.expWidth, t.sigWidth))
result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out)
result
}
override def clippedToWidthOf(t: Float): Float = {
// TODO check for overflow. Right now, we just assume that overflow doesn't happen
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth))
resizer.io.in := self_rec
resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
resizer.io.detectTininess := consts.tininess_afterRounding
val result = Wire(Float(t.expWidth, t.sigWidth))
result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out)
result
}
override def relu: Float = {
val raw = rawFloatFromFN(self.expWidth, self.sigWidth, self.bits)
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := Mux(!raw.isZero && raw.sign, 0.U, self.bits)
result
}
override def zero: Float = 0.U.asTypeOf(self)
override def identity: Float = Cat(0.U(2.W), ~(0.U((self.expWidth-1).W)), 0.U((self.sigWidth-1).W)).asTypeOf(self)
override def minimum: Float = Cat(1.U, ~(0.U(self.expWidth.W)), 0.U((self.sigWidth-1).W)).asTypeOf(self)
}
}
implicit object DummySIntArithmetic extends Arithmetic[DummySInt] {
override implicit def cast(self: DummySInt) = new ArithmeticOps(self) {
override def *(t: DummySInt) = self.dontCare
override def mac(m1: DummySInt, m2: DummySInt) = self.dontCare
override def +(t: DummySInt) = self.dontCare
override def -(t: DummySInt) = self.dontCare
override def >>(t: UInt) = self.dontCare
override def >(t: DummySInt): Bool = false.B
override def identity = self.dontCare
override def withWidthOf(t: DummySInt) = self.dontCare
override def clippedToWidthOf(t: DummySInt) = self.dontCare
override def relu = self.dontCare
override def zero = self.dontCare
override def minimum: DummySInt = self.dontCare
}
}
}
| module PE_461( // @[PE.scala:31:7]
input clock, // @[PE.scala:31:7]
input reset, // @[PE.scala:31:7]
input [7:0] io_in_a, // @[PE.scala:35:14]
input [19:0] io_in_b, // @[PE.scala:35:14]
input [19:0] io_in_d, // @[PE.scala:35:14]
output [7:0] io_out_a, // @[PE.scala:35:14]
output [19:0] io_out_b, // @[PE.scala:35:14]
output [19:0] io_out_c, // @[PE.scala:35:14]
input io_in_control_dataflow, // @[PE.scala:35:14]
input io_in_control_propagate, // @[PE.scala:35:14]
input [4:0] io_in_control_shift, // @[PE.scala:35:14]
output io_out_control_dataflow, // @[PE.scala:35:14]
output io_out_control_propagate, // @[PE.scala:35:14]
output [4:0] io_out_control_shift, // @[PE.scala:35:14]
input [2:0] io_in_id, // @[PE.scala:35:14]
output [2:0] io_out_id, // @[PE.scala:35:14]
input io_in_last, // @[PE.scala:35:14]
output io_out_last, // @[PE.scala:35:14]
input io_in_valid, // @[PE.scala:35:14]
output io_out_valid // @[PE.scala:35:14]
);
wire [7:0] io_in_a_0 = io_in_a; // @[PE.scala:31:7]
wire [19:0] io_in_b_0 = io_in_b; // @[PE.scala:31:7]
wire [19:0] io_in_d_0 = io_in_d; // @[PE.scala:31:7]
wire io_in_control_dataflow_0 = io_in_control_dataflow; // @[PE.scala:31:7]
wire io_in_control_propagate_0 = io_in_control_propagate; // @[PE.scala:31:7]
wire [4:0] io_in_control_shift_0 = io_in_control_shift; // @[PE.scala:31:7]
wire [2:0] io_in_id_0 = io_in_id; // @[PE.scala:31:7]
wire io_in_last_0 = io_in_last; // @[PE.scala:31:7]
wire io_in_valid_0 = io_in_valid; // @[PE.scala:31:7]
wire io_bad_dataflow = 1'h0; // @[PE.scala:31:7]
wire _io_out_c_T_5 = 1'h0; // @[Arithmetic.scala:125:33]
wire _io_out_c_T_6 = 1'h0; // @[Arithmetic.scala:125:60]
wire _io_out_c_T_16 = 1'h0; // @[Arithmetic.scala:125:33]
wire _io_out_c_T_17 = 1'h0; // @[Arithmetic.scala:125:60]
wire [7:0] io_out_a_0 = io_in_a_0; // @[PE.scala:31:7]
wire [19:0] _mac_unit_io_in_b_T = io_in_b_0; // @[PE.scala:31:7, :106:37]
wire [19:0] _mac_unit_io_in_b_T_2 = io_in_b_0; // @[PE.scala:31:7, :113:37]
wire [19:0] _mac_unit_io_in_b_T_8 = io_in_b_0; // @[PE.scala:31:7, :137:35]
wire io_out_control_dataflow_0 = io_in_control_dataflow_0; // @[PE.scala:31:7]
wire io_out_control_propagate_0 = io_in_control_propagate_0; // @[PE.scala:31:7]
wire [4:0] io_out_control_shift_0 = io_in_control_shift_0; // @[PE.scala:31:7]
wire [2:0] io_out_id_0 = io_in_id_0; // @[PE.scala:31:7]
wire io_out_last_0 = io_in_last_0; // @[PE.scala:31:7]
wire io_out_valid_0 = io_in_valid_0; // @[PE.scala:31:7]
wire [19:0] io_out_b_0; // @[PE.scala:31:7]
wire [19:0] io_out_c_0; // @[PE.scala:31:7]
reg [7:0] c1; // @[PE.scala:70:15]
wire [7:0] _io_out_c_zeros_T_1 = c1; // @[PE.scala:70:15]
wire [7:0] _mac_unit_io_in_b_T_6 = c1; // @[PE.scala:70:15, :127:38]
reg [7:0] c2; // @[PE.scala:71:15]
wire [7:0] _io_out_c_zeros_T_10 = c2; // @[PE.scala:71:15]
wire [7:0] _mac_unit_io_in_b_T_4 = c2; // @[PE.scala:71:15, :121:38]
reg last_s; // @[PE.scala:89:25]
wire flip = last_s != io_in_control_propagate_0; // @[PE.scala:31:7, :89:25, :90:21]
wire [4:0] shift_offset = flip ? io_in_control_shift_0 : 5'h0; // @[PE.scala:31:7, :90:21, :91:25]
wire _GEN = shift_offset == 5'h0; // @[PE.scala:91:25]
wire _io_out_c_point_five_T; // @[Arithmetic.scala:101:32]
assign _io_out_c_point_five_T = _GEN; // @[Arithmetic.scala:101:32]
wire _io_out_c_point_five_T_5; // @[Arithmetic.scala:101:32]
assign _io_out_c_point_five_T_5 = _GEN; // @[Arithmetic.scala:101:32]
wire [5:0] _GEN_0 = {1'h0, shift_offset} - 6'h1; // @[PE.scala:91:25]
wire [5:0] _io_out_c_point_five_T_1; // @[Arithmetic.scala:101:53]
assign _io_out_c_point_five_T_1 = _GEN_0; // @[Arithmetic.scala:101:53]
wire [5:0] _io_out_c_zeros_T_2; // @[Arithmetic.scala:102:66]
assign _io_out_c_zeros_T_2 = _GEN_0; // @[Arithmetic.scala:101:53, :102:66]
wire [5:0] _io_out_c_point_five_T_6; // @[Arithmetic.scala:101:53]
assign _io_out_c_point_five_T_6 = _GEN_0; // @[Arithmetic.scala:101:53]
wire [5:0] _io_out_c_zeros_T_11; // @[Arithmetic.scala:102:66]
assign _io_out_c_zeros_T_11 = _GEN_0; // @[Arithmetic.scala:101:53, :102:66]
wire [4:0] _io_out_c_point_five_T_2 = _io_out_c_point_five_T_1[4:0]; // @[Arithmetic.scala:101:53]
wire [7:0] _io_out_c_point_five_T_3 = $signed($signed(c1) >>> _io_out_c_point_five_T_2); // @[PE.scala:70:15]
wire _io_out_c_point_five_T_4 = _io_out_c_point_five_T_3[0]; // @[Arithmetic.scala:101:50]
wire io_out_c_point_five = ~_io_out_c_point_five_T & _io_out_c_point_five_T_4; // @[Arithmetic.scala:101:{29,32,50}]
wire _GEN_1 = shift_offset < 5'h2; // @[PE.scala:91:25]
wire _io_out_c_zeros_T; // @[Arithmetic.scala:102:27]
assign _io_out_c_zeros_T = _GEN_1; // @[Arithmetic.scala:102:27]
wire _io_out_c_zeros_T_9; // @[Arithmetic.scala:102:27]
assign _io_out_c_zeros_T_9 = _GEN_1; // @[Arithmetic.scala:102:27]
wire [4:0] _io_out_c_zeros_T_3 = _io_out_c_zeros_T_2[4:0]; // @[Arithmetic.scala:102:66]
wire [31:0] _io_out_c_zeros_T_4 = 32'h1 << _io_out_c_zeros_T_3; // @[Arithmetic.scala:102:{60,66}]
wire [32:0] _io_out_c_zeros_T_5 = {1'h0, _io_out_c_zeros_T_4} - 33'h1; // @[Arithmetic.scala:102:{60,81}]
wire [31:0] _io_out_c_zeros_T_6 = _io_out_c_zeros_T_5[31:0]; // @[Arithmetic.scala:102:81]
wire [31:0] _io_out_c_zeros_T_7 = {24'h0, _io_out_c_zeros_T_6[7:0] & _io_out_c_zeros_T_1}; // @[Arithmetic.scala:102:{45,52,81}]
wire [31:0] _io_out_c_zeros_T_8 = _io_out_c_zeros_T ? 32'h0 : _io_out_c_zeros_T_7; // @[Arithmetic.scala:102:{24,27,52}]
wire io_out_c_zeros = |_io_out_c_zeros_T_8; // @[Arithmetic.scala:102:{24,89}]
wire [7:0] _GEN_2 = {3'h0, shift_offset}; // @[PE.scala:91:25]
wire [7:0] _GEN_3 = $signed($signed(c1) >>> _GEN_2); // @[PE.scala:70:15]
wire [7:0] _io_out_c_ones_digit_T; // @[Arithmetic.scala:103:30]
assign _io_out_c_ones_digit_T = _GEN_3; // @[Arithmetic.scala:103:30]
wire [7:0] _io_out_c_T; // @[Arithmetic.scala:107:15]
assign _io_out_c_T = _GEN_3; // @[Arithmetic.scala:103:30, :107:15]
wire io_out_c_ones_digit = _io_out_c_ones_digit_T[0]; // @[Arithmetic.scala:103:30]
wire _io_out_c_r_T = io_out_c_zeros | io_out_c_ones_digit; // @[Arithmetic.scala:102:89, :103:30, :105:38]
wire _io_out_c_r_T_1 = io_out_c_point_five & _io_out_c_r_T; // @[Arithmetic.scala:101:29, :105:{29,38}]
wire io_out_c_r = _io_out_c_r_T_1; // @[Arithmetic.scala:105:{29,53}]
wire [1:0] _io_out_c_T_1 = {1'h0, io_out_c_r}; // @[Arithmetic.scala:105:53, :107:33]
wire [8:0] _io_out_c_T_2 = {_io_out_c_T[7], _io_out_c_T} + {{7{_io_out_c_T_1[1]}}, _io_out_c_T_1}; // @[Arithmetic.scala:107:{15,28,33}]
wire [7:0] _io_out_c_T_3 = _io_out_c_T_2[7:0]; // @[Arithmetic.scala:107:28]
wire [7:0] _io_out_c_T_4 = _io_out_c_T_3; // @[Arithmetic.scala:107:28]
wire [19:0] _io_out_c_T_7 = {{12{_io_out_c_T_4[7]}}, _io_out_c_T_4}; // @[Mux.scala:126:16]
wire [19:0] _io_out_c_T_8 = _io_out_c_T_7; // @[Mux.scala:126:16]
wire [19:0] _io_out_c_T_9 = _io_out_c_T_8; // @[Mux.scala:126:16]
wire [19:0] _io_out_c_T_10 = _io_out_c_T_9; // @[Arithmetic.scala:125:{81,99}]
wire [19:0] _mac_unit_io_in_b_T_1 = _mac_unit_io_in_b_T; // @[PE.scala:106:37]
wire [7:0] _mac_unit_io_in_b_WIRE = _mac_unit_io_in_b_T_1[7:0]; // @[PE.scala:106:37]
wire [7:0] _c1_T = io_in_d_0[7:0]; // @[PE.scala:31:7]
wire [7:0] _c2_T = io_in_d_0[7:0]; // @[PE.scala:31:7]
wire [7:0] _c1_T_1 = _c1_T; // @[Arithmetic.scala:114:{15,33}]
wire [4:0] _io_out_c_point_five_T_7 = _io_out_c_point_five_T_6[4:0]; // @[Arithmetic.scala:101:53]
wire [7:0] _io_out_c_point_five_T_8 = $signed($signed(c2) >>> _io_out_c_point_five_T_7); // @[PE.scala:71:15]
wire _io_out_c_point_five_T_9 = _io_out_c_point_five_T_8[0]; // @[Arithmetic.scala:101:50]
wire io_out_c_point_five_1 = ~_io_out_c_point_five_T_5 & _io_out_c_point_five_T_9; // @[Arithmetic.scala:101:{29,32,50}]
wire [4:0] _io_out_c_zeros_T_12 = _io_out_c_zeros_T_11[4:0]; // @[Arithmetic.scala:102:66]
wire [31:0] _io_out_c_zeros_T_13 = 32'h1 << _io_out_c_zeros_T_12; // @[Arithmetic.scala:102:{60,66}]
wire [32:0] _io_out_c_zeros_T_14 = {1'h0, _io_out_c_zeros_T_13} - 33'h1; // @[Arithmetic.scala:102:{60,81}]
wire [31:0] _io_out_c_zeros_T_15 = _io_out_c_zeros_T_14[31:0]; // @[Arithmetic.scala:102:81]
wire [31:0] _io_out_c_zeros_T_16 = {24'h0, _io_out_c_zeros_T_15[7:0] & _io_out_c_zeros_T_10}; // @[Arithmetic.scala:102:{45,52,81}]
wire [31:0] _io_out_c_zeros_T_17 = _io_out_c_zeros_T_9 ? 32'h0 : _io_out_c_zeros_T_16; // @[Arithmetic.scala:102:{24,27,52}]
wire io_out_c_zeros_1 = |_io_out_c_zeros_T_17; // @[Arithmetic.scala:102:{24,89}]
wire [7:0] _GEN_4 = $signed($signed(c2) >>> _GEN_2); // @[PE.scala:71:15]
wire [7:0] _io_out_c_ones_digit_T_1; // @[Arithmetic.scala:103:30]
assign _io_out_c_ones_digit_T_1 = _GEN_4; // @[Arithmetic.scala:103:30]
wire [7:0] _io_out_c_T_11; // @[Arithmetic.scala:107:15]
assign _io_out_c_T_11 = _GEN_4; // @[Arithmetic.scala:103:30, :107:15]
wire io_out_c_ones_digit_1 = _io_out_c_ones_digit_T_1[0]; // @[Arithmetic.scala:103:30]
wire _io_out_c_r_T_2 = io_out_c_zeros_1 | io_out_c_ones_digit_1; // @[Arithmetic.scala:102:89, :103:30, :105:38]
wire _io_out_c_r_T_3 = io_out_c_point_five_1 & _io_out_c_r_T_2; // @[Arithmetic.scala:101:29, :105:{29,38}]
wire io_out_c_r_1 = _io_out_c_r_T_3; // @[Arithmetic.scala:105:{29,53}]
wire [1:0] _io_out_c_T_12 = {1'h0, io_out_c_r_1}; // @[Arithmetic.scala:105:53, :107:33]
wire [8:0] _io_out_c_T_13 = {_io_out_c_T_11[7], _io_out_c_T_11} + {{7{_io_out_c_T_12[1]}}, _io_out_c_T_12}; // @[Arithmetic.scala:107:{15,28,33}]
wire [7:0] _io_out_c_T_14 = _io_out_c_T_13[7:0]; // @[Arithmetic.scala:107:28]
wire [7:0] _io_out_c_T_15 = _io_out_c_T_14; // @[Arithmetic.scala:107:28]
wire [19:0] _io_out_c_T_18 = {{12{_io_out_c_T_15[7]}}, _io_out_c_T_15}; // @[Mux.scala:126:16]
wire [19:0] _io_out_c_T_19 = _io_out_c_T_18; // @[Mux.scala:126:16]
wire [19:0] _io_out_c_T_20 = _io_out_c_T_19; // @[Mux.scala:126:16]
wire [19:0] _io_out_c_T_21 = _io_out_c_T_20; // @[Arithmetic.scala:125:{81,99}]
wire [19:0] _mac_unit_io_in_b_T_3 = _mac_unit_io_in_b_T_2; // @[PE.scala:113:37]
wire [7:0] _mac_unit_io_in_b_WIRE_1 = _mac_unit_io_in_b_T_3[7:0]; // @[PE.scala:113:37]
wire [7:0] _c2_T_1 = _c2_T; // @[Arithmetic.scala:114:{15,33}]
wire [7:0] _mac_unit_io_in_b_T_5; // @[PE.scala:121:38]
assign _mac_unit_io_in_b_T_5 = _mac_unit_io_in_b_T_4; // @[PE.scala:121:38]
wire [7:0] _mac_unit_io_in_b_WIRE_2 = _mac_unit_io_in_b_T_5; // @[PE.scala:121:38]
assign io_out_c_0 = io_in_control_propagate_0 ? {{12{c1[7]}}, c1} : {{12{c2[7]}}, c2}; // @[PE.scala:31:7, :70:15, :71:15, :119:30, :120:16, :126:16]
wire [7:0] _mac_unit_io_in_b_T_7; // @[PE.scala:127:38]
assign _mac_unit_io_in_b_T_7 = _mac_unit_io_in_b_T_6; // @[PE.scala:127:38]
wire [7:0] _mac_unit_io_in_b_WIRE_3 = _mac_unit_io_in_b_T_7; // @[PE.scala:127:38]
wire [19:0] _mac_unit_io_in_b_T_9 = _mac_unit_io_in_b_T_8; // @[PE.scala:137:35]
wire [7:0] _mac_unit_io_in_b_WIRE_4 = _mac_unit_io_in_b_T_9[7:0]; // @[PE.scala:137:35]
always @(posedge clock) begin // @[PE.scala:31:7]
if (io_in_valid_0 & io_in_control_propagate_0) // @[PE.scala:31:7, :102:95, :141:17, :142:8]
c1 <= io_in_d_0[7:0]; // @[PE.scala:31:7, :70:15]
if (~(~io_in_valid_0 | io_in_control_propagate_0)) // @[PE.scala:31:7, :71:15, :102:95, :119:30, :130:10, :141:{9,17}, :143:8]
c2 <= io_in_d_0[7:0]; // @[PE.scala:31:7, :71:15]
if (io_in_valid_0) // @[PE.scala:31:7]
last_s <= io_in_control_propagate_0; // @[PE.scala:31:7, :89:25]
always @(posedge)
MacUnit_205 mac_unit ( // @[PE.scala:64:24]
.clock (clock),
.reset (reset),
.io_in_a (io_in_a_0), // @[PE.scala:31:7]
.io_in_b (io_in_control_propagate_0 ? _mac_unit_io_in_b_WIRE_2 : _mac_unit_io_in_b_WIRE_3), // @[PE.scala:31:7, :119:30, :121:{24,38}, :127:{24,38}]
.io_in_c (io_in_b_0), // @[PE.scala:31:7]
.io_out_d (io_out_b_0)
); // @[PE.scala:64:24]
assign io_out_a = io_out_a_0; // @[PE.scala:31:7]
assign io_out_b = io_out_b_0; // @[PE.scala:31:7]
assign io_out_c = io_out_c_0; // @[PE.scala:31:7]
assign io_out_control_dataflow = io_out_control_dataflow_0; // @[PE.scala:31:7]
assign io_out_control_propagate = io_out_control_propagate_0; // @[PE.scala:31:7]
assign io_out_control_shift = io_out_control_shift_0; // @[PE.scala:31:7]
assign io_out_id = io_out_id_0; // @[PE.scala:31:7]
assign io_out_last = io_out_last_0; // @[PE.scala:31:7]
assign io_out_valid = io_out_valid_0; // @[PE.scala:31:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File PE.scala:
// See README.md for license details.
package gemmini
import chisel3._
import chisel3.util._
class PEControl[T <: Data : Arithmetic](accType: T) extends Bundle {
val dataflow = UInt(1.W) // TODO make this an Enum
val propagate = UInt(1.W) // Which register should be propagated (and which should be accumulated)?
val shift = UInt(log2Up(accType.getWidth).W) // TODO this isn't correct for Floats
}
class MacUnit[T <: Data](inputType: T, cType: T, dType: T) (implicit ev: Arithmetic[T]) extends Module {
import ev._
val io = IO(new Bundle {
val in_a = Input(inputType)
val in_b = Input(inputType)
val in_c = Input(cType)
val out_d = Output(dType)
})
io.out_d := io.in_c.mac(io.in_a, io.in_b)
}
// TODO update documentation
/**
* A PE implementing a MAC operation. Configured as fully combinational when integrated into a Mesh.
* @param width Data width of operands
*/
class PE[T <: Data](inputType: T, outputType: T, accType: T, df: Dataflow.Value, max_simultaneous_matmuls: Int)
(implicit ev: Arithmetic[T]) extends Module { // Debugging variables
import ev._
val io = IO(new Bundle {
val in_a = Input(inputType)
val in_b = Input(outputType)
val in_d = Input(outputType)
val out_a = Output(inputType)
val out_b = Output(outputType)
val out_c = Output(outputType)
val in_control = Input(new PEControl(accType))
val out_control = Output(new PEControl(accType))
val in_id = Input(UInt(log2Up(max_simultaneous_matmuls).W))
val out_id = Output(UInt(log2Up(max_simultaneous_matmuls).W))
val in_last = Input(Bool())
val out_last = Output(Bool())
val in_valid = Input(Bool())
val out_valid = Output(Bool())
val bad_dataflow = Output(Bool())
})
val cType = if (df == Dataflow.WS) inputType else accType
// When creating PEs that support multiple dataflows, the
// elaboration/synthesis tools often fail to consolidate and de-duplicate
// MAC units. To force mac circuitry to be re-used, we create a "mac_unit"
// module here which just performs a single MAC operation
val mac_unit = Module(new MacUnit(inputType,
if (df == Dataflow.WS) outputType else accType, outputType))
val a = io.in_a
val b = io.in_b
val d = io.in_d
val c1 = Reg(cType)
val c2 = Reg(cType)
val dataflow = io.in_control.dataflow
val prop = io.in_control.propagate
val shift = io.in_control.shift
val id = io.in_id
val last = io.in_last
val valid = io.in_valid
io.out_a := a
io.out_control.dataflow := dataflow
io.out_control.propagate := prop
io.out_control.shift := shift
io.out_id := id
io.out_last := last
io.out_valid := valid
mac_unit.io.in_a := a
val last_s = RegEnable(prop, valid)
val flip = last_s =/= prop
val shift_offset = Mux(flip, shift, 0.U)
// Which dataflow are we using?
val OUTPUT_STATIONARY = Dataflow.OS.id.U(1.W)
val WEIGHT_STATIONARY = Dataflow.WS.id.U(1.W)
// Is c1 being computed on, or propagated forward (in the output-stationary dataflow)?
val COMPUTE = 0.U(1.W)
val PROPAGATE = 1.U(1.W)
io.bad_dataflow := false.B
when ((df == Dataflow.OS).B || ((df == Dataflow.BOTH).B && dataflow === OUTPUT_STATIONARY)) {
when(prop === PROPAGATE) {
io.out_c := (c1 >> shift_offset).clippedToWidthOf(outputType)
io.out_b := b
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c2
c2 := mac_unit.io.out_d
c1 := d.withWidthOf(cType)
}.otherwise {
io.out_c := (c2 >> shift_offset).clippedToWidthOf(outputType)
io.out_b := b
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c1
c1 := mac_unit.io.out_d
c2 := d.withWidthOf(cType)
}
}.elsewhen ((df == Dataflow.WS).B || ((df == Dataflow.BOTH).B && dataflow === WEIGHT_STATIONARY)) {
when(prop === PROPAGATE) {
io.out_c := c1
mac_unit.io.in_b := c2.asTypeOf(inputType)
mac_unit.io.in_c := b
io.out_b := mac_unit.io.out_d
c1 := d
}.otherwise {
io.out_c := c2
mac_unit.io.in_b := c1.asTypeOf(inputType)
mac_unit.io.in_c := b
io.out_b := mac_unit.io.out_d
c2 := d
}
}.otherwise {
io.bad_dataflow := true.B
//assert(false.B, "unknown dataflow")
io.out_c := DontCare
io.out_b := DontCare
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c2
}
when (!valid) {
c1 := c1
c2 := c2
mac_unit.io.in_b := DontCare
mac_unit.io.in_c := DontCare
}
}
File Arithmetic.scala:
// A simple type class for Chisel datatypes that can add and multiply. To add your own type, simply create your own:
// implicit MyTypeArithmetic extends Arithmetic[MyType] { ... }
package gemmini
import chisel3._
import chisel3.util._
import hardfloat._
// Bundles that represent the raw bits of custom datatypes
case class Float(expWidth: Int, sigWidth: Int) extends Bundle {
val bits = UInt((expWidth + sigWidth).W)
val bias: Int = (1 << (expWidth-1)) - 1
}
case class DummySInt(w: Int) extends Bundle {
val bits = UInt(w.W)
def dontCare: DummySInt = {
val o = Wire(new DummySInt(w))
o.bits := 0.U
o
}
}
// The Arithmetic typeclass which implements various arithmetic operations on custom datatypes
abstract class Arithmetic[T <: Data] {
implicit def cast(t: T): ArithmeticOps[T]
}
abstract class ArithmeticOps[T <: Data](self: T) {
def *(t: T): T
def mac(m1: T, m2: T): T // Returns (m1 * m2 + self)
def +(t: T): T
def -(t: T): T
def >>(u: UInt): T // This is a rounding shift! Rounds away from 0
def >(t: T): Bool
def identity: T
def withWidthOf(t: T): T
def clippedToWidthOf(t: T): T // Like "withWidthOf", except that it saturates
def relu: T
def zero: T
def minimum: T
// Optional parameters, which only need to be defined if you want to enable various optimizations for transformers
def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[T])] = None
def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[T])] = None
def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = None
def mult_with_reciprocal[U <: Data](reciprocal: U) = self
}
object Arithmetic {
implicit object UIntArithmetic extends Arithmetic[UInt] {
override implicit def cast(self: UInt) = new ArithmeticOps(self) {
override def *(t: UInt) = self * t
override def mac(m1: UInt, m2: UInt) = m1 * m2 + self
override def +(t: UInt) = self + t
override def -(t: UInt) = self - t
override def >>(u: UInt) = {
// The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm
// TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here?
val point_five = Mux(u === 0.U, 0.U, self(u - 1.U))
val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U
val ones_digit = self(u)
val r = point_five & (zeros | ones_digit)
(self >> u).asUInt + r
}
override def >(t: UInt): Bool = self > t
override def withWidthOf(t: UInt) = self.asTypeOf(t)
override def clippedToWidthOf(t: UInt) = {
val sat = ((1 << (t.getWidth-1))-1).U
Mux(self > sat, sat, self)(t.getWidth-1, 0)
}
override def relu: UInt = self
override def zero: UInt = 0.U
override def identity: UInt = 1.U
override def minimum: UInt = 0.U
}
}
implicit object SIntArithmetic extends Arithmetic[SInt] {
override implicit def cast(self: SInt) = new ArithmeticOps(self) {
override def *(t: SInt) = self * t
override def mac(m1: SInt, m2: SInt) = m1 * m2 + self
override def +(t: SInt) = self + t
override def -(t: SInt) = self - t
override def >>(u: UInt) = {
// The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm
// TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here?
val point_five = Mux(u === 0.U, 0.U, self(u - 1.U))
val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U
val ones_digit = self(u)
val r = (point_five & (zeros | ones_digit)).asBool
(self >> u).asSInt + Mux(r, 1.S, 0.S)
}
override def >(t: SInt): Bool = self > t
override def withWidthOf(t: SInt) = {
if (self.getWidth >= t.getWidth)
self(t.getWidth-1, 0).asSInt
else {
val sign_bits = t.getWidth - self.getWidth
val sign = self(self.getWidth-1)
Cat(Cat(Seq.fill(sign_bits)(sign)), self).asTypeOf(t)
}
}
override def clippedToWidthOf(t: SInt): SInt = {
val maxsat = ((1 << (t.getWidth-1))-1).S
val minsat = (-(1 << (t.getWidth-1))).S
MuxCase(self, Seq((self > maxsat) -> maxsat, (self < minsat) -> minsat))(t.getWidth-1, 0).asSInt
}
override def relu: SInt = Mux(self >= 0.S, self, 0.S)
override def zero: SInt = 0.S
override def identity: SInt = 1.S
override def minimum: SInt = (-(1 << (self.getWidth-1))).S
override def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = {
// TODO this uses a floating point divider, but we should use an integer divider instead
val input = Wire(Decoupled(denom_t.cloneType))
val output = Wire(Decoupled(self.cloneType))
// We translate our integer to floating-point form so that we can use the hardfloat divider
val expWidth = log2Up(self.getWidth) + 1
val sigWidth = self.getWidth
def sin_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def uin_to_float(x: UInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := false.B
in_to_rec_fn.io.in := x
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = sin_to_float(self)
val denom_rec = uin_to_float(input.bits)
// Instantiate the hardloat divider
val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options))
input.ready := divider.io.inReady
divider.io.inValid := input.valid
divider.io.sqrtOp := false.B
divider.io.a := self_rec
divider.io.b := denom_rec
divider.io.roundingMode := consts.round_minMag
divider.io.detectTininess := consts.tininess_afterRounding
output.valid := divider.io.outValid_div
output.bits := float_to_in(divider.io.out)
assert(!output.valid || output.ready)
Some((input, output))
}
override def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = {
// TODO this uses a floating point divider, but we should use an integer divider instead
val input = Wire(Decoupled(UInt(0.W)))
val output = Wire(Decoupled(self.cloneType))
input.bits := DontCare
// We translate our integer to floating-point form so that we can use the hardfloat divider
val expWidth = log2Up(self.getWidth) + 1
val sigWidth = self.getWidth
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = in_to_float(self)
// Instantiate the hardloat sqrt
val sqrter = Module(new DivSqrtRecFN_small(expWidth, sigWidth, 0))
input.ready := sqrter.io.inReady
sqrter.io.inValid := input.valid
sqrter.io.sqrtOp := true.B
sqrter.io.a := self_rec
sqrter.io.b := DontCare
sqrter.io.roundingMode := consts.round_minMag
sqrter.io.detectTininess := consts.tininess_afterRounding
output.valid := sqrter.io.outValid_sqrt
output.bits := float_to_in(sqrter.io.out)
assert(!output.valid || output.ready)
Some((input, output))
}
override def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = u match {
case Float(expWidth, sigWidth) =>
val input = Wire(Decoupled(UInt(0.W)))
val output = Wire(Decoupled(u.cloneType))
input.bits := DontCare
// We translate our integer to floating-point form so that we can use the hardfloat divider
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
val self_rec = in_to_float(self)
val one_rec = in_to_float(1.S)
// Instantiate the hardloat divider
val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options))
input.ready := divider.io.inReady
divider.io.inValid := input.valid
divider.io.sqrtOp := false.B
divider.io.a := one_rec
divider.io.b := self_rec
divider.io.roundingMode := consts.round_near_even
divider.io.detectTininess := consts.tininess_afterRounding
output.valid := divider.io.outValid_div
output.bits := fNFromRecFN(expWidth, sigWidth, divider.io.out).asTypeOf(u)
assert(!output.valid || output.ready)
Some((input, output))
case _ => None
}
override def mult_with_reciprocal[U <: Data](reciprocal: U): SInt = reciprocal match {
case recip @ Float(expWidth, sigWidth) =>
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = in_to_float(self)
val reciprocal_rec = recFNFromFN(expWidth, sigWidth, recip.bits)
// Instantiate the hardloat divider
val muladder = Module(new MulRecFN(expWidth, sigWidth))
muladder.io.roundingMode := consts.round_near_even
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := reciprocal_rec
float_to_in(muladder.io.out)
case _ => self
}
}
}
implicit object FloatArithmetic extends Arithmetic[Float] {
// TODO Floating point arithmetic currently switches between recoded and standard formats for every operation. However, it should stay in the recoded format as it travels through the systolic array
override implicit def cast(self: Float): ArithmeticOps[Float] = new ArithmeticOps(self) {
override def *(t: Float): Float = {
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth))
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := t_rec_resized
val out = Wire(Float(self.expWidth, self.sigWidth))
out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
out
}
override def mac(m1: Float, m2: Float): Float = {
// Recode all operands
val m1_rec = recFNFromFN(m1.expWidth, m1.sigWidth, m1.bits)
val m2_rec = recFNFromFN(m2.expWidth, m2.sigWidth, m2.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Resize m1 to self's width
val m1_resizer = Module(new RecFNToRecFN(m1.expWidth, m1.sigWidth, self.expWidth, self.sigWidth))
m1_resizer.io.in := m1_rec
m1_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
m1_resizer.io.detectTininess := consts.tininess_afterRounding
val m1_rec_resized = m1_resizer.io.out
// Resize m2 to self's width
val m2_resizer = Module(new RecFNToRecFN(m2.expWidth, m2.sigWidth, self.expWidth, self.sigWidth))
m2_resizer.io.in := m2_rec
m2_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
m2_resizer.io.detectTininess := consts.tininess_afterRounding
val m2_rec_resized = m2_resizer.io.out
// Perform multiply-add
val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth))
muladder.io.op := 0.U
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := m1_rec_resized
muladder.io.b := m2_rec_resized
muladder.io.c := self_rec
// Convert result to standard format // TODO remove these intermediate recodings
val out = Wire(Float(self.expWidth, self.sigWidth))
out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
out
}
override def +(t: Float): Float = {
require(self.getWidth >= t.getWidth) // This just makes it easier to write the resizing code
// Recode all operands
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Generate 1 as a float
val in_to_rec_fn = Module(new INToRecFN(1, self.expWidth, self.sigWidth))
in_to_rec_fn.io.signedIn := false.B
in_to_rec_fn.io.in := 1.U
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
val one_rec = in_to_rec_fn.io.out
// Resize t
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
// Perform addition
val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth))
muladder.io.op := 0.U
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := t_rec_resized
muladder.io.b := one_rec
muladder.io.c := self_rec
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
result
}
override def -(t: Float): Float = {
val t_sgn = t.bits(t.getWidth-1)
val neg_t = Cat(~t_sgn, t.bits(t.getWidth-2,0)).asTypeOf(t)
self + neg_t
}
override def >>(u: UInt): Float = {
// Recode self
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Get 2^(-u) as a recoded float
val shift_exp = Wire(UInt(self.expWidth.W))
shift_exp := self.bias.U - u
val shift_fn = Cat(0.U(1.W), shift_exp, 0.U((self.sigWidth-1).W))
val shift_rec = recFNFromFN(self.expWidth, self.sigWidth, shift_fn)
assert(shift_exp =/= 0.U, "scaling by denormalized numbers is not currently supported")
// Multiply self and 2^(-u)
val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth))
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := shift_rec
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
result
}
override def >(t: Float): Bool = {
// Recode all operands
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Resize t to self's width
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
val comparator = Module(new CompareRecFN(self.expWidth, self.sigWidth))
comparator.io.a := self_rec
comparator.io.b := t_rec_resized
comparator.io.signaling := false.B
comparator.io.gt
}
override def withWidthOf(t: Float): Float = {
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth))
resizer.io.in := self_rec
resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
resizer.io.detectTininess := consts.tininess_afterRounding
val result = Wire(Float(t.expWidth, t.sigWidth))
result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out)
result
}
override def clippedToWidthOf(t: Float): Float = {
// TODO check for overflow. Right now, we just assume that overflow doesn't happen
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth))
resizer.io.in := self_rec
resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
resizer.io.detectTininess := consts.tininess_afterRounding
val result = Wire(Float(t.expWidth, t.sigWidth))
result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out)
result
}
override def relu: Float = {
val raw = rawFloatFromFN(self.expWidth, self.sigWidth, self.bits)
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := Mux(!raw.isZero && raw.sign, 0.U, self.bits)
result
}
override def zero: Float = 0.U.asTypeOf(self)
override def identity: Float = Cat(0.U(2.W), ~(0.U((self.expWidth-1).W)), 0.U((self.sigWidth-1).W)).asTypeOf(self)
override def minimum: Float = Cat(1.U, ~(0.U(self.expWidth.W)), 0.U((self.sigWidth-1).W)).asTypeOf(self)
}
}
implicit object DummySIntArithmetic extends Arithmetic[DummySInt] {
override implicit def cast(self: DummySInt) = new ArithmeticOps(self) {
override def *(t: DummySInt) = self.dontCare
override def mac(m1: DummySInt, m2: DummySInt) = self.dontCare
override def +(t: DummySInt) = self.dontCare
override def -(t: DummySInt) = self.dontCare
override def >>(t: UInt) = self.dontCare
override def >(t: DummySInt): Bool = false.B
override def identity = self.dontCare
override def withWidthOf(t: DummySInt) = self.dontCare
override def clippedToWidthOf(t: DummySInt) = self.dontCare
override def relu = self.dontCare
override def zero = self.dontCare
override def minimum: DummySInt = self.dontCare
}
}
}
| module MacUnit_129( // @[PE.scala:14:7]
input clock, // @[PE.scala:14:7]
input reset, // @[PE.scala:14:7]
input [7:0] io_in_a, // @[PE.scala:16:14]
input [7:0] io_in_b, // @[PE.scala:16:14]
input [19:0] io_in_c, // @[PE.scala:16:14]
output [19:0] io_out_d // @[PE.scala:16:14]
);
wire [7:0] io_in_a_0 = io_in_a; // @[PE.scala:14:7]
wire [7:0] io_in_b_0 = io_in_b; // @[PE.scala:14:7]
wire [19:0] io_in_c_0 = io_in_c; // @[PE.scala:14:7]
wire [19:0] _io_out_d_T_3; // @[Arithmetic.scala:93:54]
wire [19:0] io_out_d_0; // @[PE.scala:14:7]
wire [15:0] _io_out_d_T = {{8{io_in_a_0[7]}}, io_in_a_0} * {{8{io_in_b_0[7]}}, io_in_b_0}; // @[PE.scala:14:7]
wire [20:0] _io_out_d_T_1 = {{5{_io_out_d_T[15]}}, _io_out_d_T} + {io_in_c_0[19], io_in_c_0}; // @[PE.scala:14:7]
wire [19:0] _io_out_d_T_2 = _io_out_d_T_1[19:0]; // @[Arithmetic.scala:93:54]
assign _io_out_d_T_3 = _io_out_d_T_2; // @[Arithmetic.scala:93:54]
assign io_out_d_0 = _io_out_d_T_3; // @[PE.scala:14:7]
assign io_out_d = io_out_d_0; // @[PE.scala:14:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File RoundAnyRawFNToRecFN.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import chisel3.util.Fill
import consts._
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class
RoundAnyRawFNToRecFN(
inExpWidth: Int,
inSigWidth: Int,
outExpWidth: Int,
outSigWidth: Int,
options: Int
)
extends RawModule
{
override def desiredName = s"RoundAnyRawFNToRecFN_ie${inExpWidth}_is${inSigWidth}_oe${outExpWidth}_os${outSigWidth}"
val io = IO(new Bundle {
val invalidExc = Input(Bool()) // overrides 'infiniteExc' and 'in'
val infiniteExc = Input(Bool()) // overrides 'in' except for 'in.sign'
val in = Input(new RawFloat(inExpWidth, inSigWidth))
// (allowed exponent range has limits)
val roundingMode = Input(UInt(3.W))
val detectTininess = Input(UInt(1.W))
val out = Output(Bits((outExpWidth + outSigWidth + 1).W))
val exceptionFlags = Output(Bits(5.W))
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val sigMSBitAlwaysZero = ((options & flRoundOpt_sigMSBitAlwaysZero) != 0)
val effectiveInSigWidth =
if (sigMSBitAlwaysZero) inSigWidth else inSigWidth + 1
val neverUnderflows =
((options &
(flRoundOpt_neverUnderflows | flRoundOpt_subnormsAlwaysExact)
) != 0) ||
(inExpWidth < outExpWidth)
val neverOverflows =
((options & flRoundOpt_neverOverflows) != 0) ||
(inExpWidth < outExpWidth)
val outNaNExp = BigInt(7)<<(outExpWidth - 2)
val outInfExp = BigInt(6)<<(outExpWidth - 2)
val outMaxFiniteExp = outInfExp - 1
val outMinNormExp = (BigInt(1)<<(outExpWidth - 1)) + 2
val outMinNonzeroExp = outMinNormExp - outSigWidth + 1
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val roundingMode_near_even = (io.roundingMode === round_near_even)
val roundingMode_minMag = (io.roundingMode === round_minMag)
val roundingMode_min = (io.roundingMode === round_min)
val roundingMode_max = (io.roundingMode === round_max)
val roundingMode_near_maxMag = (io.roundingMode === round_near_maxMag)
val roundingMode_odd = (io.roundingMode === round_odd)
val roundMagUp =
(roundingMode_min && io.in.sign) || (roundingMode_max && ! io.in.sign)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val sAdjustedExp =
if (inExpWidth < outExpWidth)
(io.in.sExp +&
((BigInt(1)<<outExpWidth) - (BigInt(1)<<inExpWidth)).S
)(outExpWidth, 0).zext
else if (inExpWidth == outExpWidth)
io.in.sExp
else
io.in.sExp +&
((BigInt(1)<<outExpWidth) - (BigInt(1)<<inExpWidth)).S
val adjustedSig =
if (inSigWidth <= outSigWidth + 2)
io.in.sig<<(outSigWidth - inSigWidth + 2)
else
(io.in.sig(inSigWidth, inSigWidth - outSigWidth - 1) ##
io.in.sig(inSigWidth - outSigWidth - 2, 0).orR
)
val doShiftSigDown1 =
if (sigMSBitAlwaysZero) false.B else adjustedSig(outSigWidth + 2)
val common_expOut = Wire(UInt((outExpWidth + 1).W))
val common_fractOut = Wire(UInt((outSigWidth - 1).W))
val common_overflow = Wire(Bool())
val common_totalUnderflow = Wire(Bool())
val common_underflow = Wire(Bool())
val common_inexact = Wire(Bool())
if (
neverOverflows && neverUnderflows
&& (effectiveInSigWidth <= outSigWidth)
) {
//--------------------------------------------------------------------
//--------------------------------------------------------------------
common_expOut := sAdjustedExp(outExpWidth, 0) + doShiftSigDown1
common_fractOut :=
Mux(doShiftSigDown1,
adjustedSig(outSigWidth + 1, 3),
adjustedSig(outSigWidth, 2)
)
common_overflow := false.B
common_totalUnderflow := false.B
common_underflow := false.B
common_inexact := false.B
} else {
//--------------------------------------------------------------------
//--------------------------------------------------------------------
val roundMask =
if (neverUnderflows)
0.U(outSigWidth.W) ## doShiftSigDown1 ## 3.U(2.W)
else
(lowMask(
sAdjustedExp(outExpWidth, 0),
outMinNormExp - outSigWidth - 1,
outMinNormExp
) | doShiftSigDown1) ##
3.U(2.W)
val shiftedRoundMask = 0.U(1.W) ## roundMask>>1
val roundPosMask = ~shiftedRoundMask & roundMask
val roundPosBit = (adjustedSig & roundPosMask).orR
val anyRoundExtra = (adjustedSig & shiftedRoundMask).orR
val anyRound = roundPosBit || anyRoundExtra
val roundIncr =
((roundingMode_near_even || roundingMode_near_maxMag) &&
roundPosBit) ||
(roundMagUp && anyRound)
val roundedSig: Bits =
Mux(roundIncr,
(((adjustedSig | roundMask)>>2) +& 1.U) &
~Mux(roundingMode_near_even && roundPosBit &&
! anyRoundExtra,
roundMask>>1,
0.U((outSigWidth + 2).W)
),
(adjustedSig & ~roundMask)>>2 |
Mux(roundingMode_odd && anyRound, roundPosMask>>1, 0.U)
)
//*** IF SIG WIDTH IS VERY NARROW, NEED TO ACCOUNT FOR ROUND-EVEN ZEROING
//*** M.S. BIT OF SUBNORMAL SIG?
val sRoundedExp = sAdjustedExp +& (roundedSig>>outSigWidth).asUInt.zext
common_expOut := sRoundedExp(outExpWidth, 0)
common_fractOut :=
Mux(doShiftSigDown1,
roundedSig(outSigWidth - 1, 1),
roundedSig(outSigWidth - 2, 0)
)
common_overflow :=
(if (neverOverflows) false.B else
//*** REWRITE BASED ON BEFORE-ROUNDING EXPONENT?:
(sRoundedExp>>(outExpWidth - 1) >= 3.S))
common_totalUnderflow :=
(if (neverUnderflows) false.B else
//*** WOULD BE GOOD ENOUGH TO USE EXPONENT BEFORE ROUNDING?:
(sRoundedExp < outMinNonzeroExp.S))
val unboundedRange_roundPosBit =
Mux(doShiftSigDown1, adjustedSig(2), adjustedSig(1))
val unboundedRange_anyRound =
(doShiftSigDown1 && adjustedSig(2)) || adjustedSig(1, 0).orR
val unboundedRange_roundIncr =
((roundingMode_near_even || roundingMode_near_maxMag) &&
unboundedRange_roundPosBit) ||
(roundMagUp && unboundedRange_anyRound)
val roundCarry =
Mux(doShiftSigDown1,
roundedSig(outSigWidth + 1),
roundedSig(outSigWidth)
)
common_underflow :=
(if (neverUnderflows) false.B else
common_totalUnderflow ||
//*** IF SIG WIDTH IS VERY NARROW, NEED TO ACCOUNT FOR ROUND-EVEN ZEROING
//*** M.S. BIT OF SUBNORMAL SIG?
(anyRound && ((sAdjustedExp>>outExpWidth) <= 0.S) &&
Mux(doShiftSigDown1, roundMask(3), roundMask(2)) &&
! ((io.detectTininess === tininess_afterRounding) &&
! Mux(doShiftSigDown1,
roundMask(4),
roundMask(3)
) &&
roundCarry && roundPosBit &&
unboundedRange_roundIncr)))
common_inexact := common_totalUnderflow || anyRound
}
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val isNaNOut = io.invalidExc || io.in.isNaN
val notNaN_isSpecialInfOut = io.infiniteExc || io.in.isInf
val commonCase = ! isNaNOut && ! notNaN_isSpecialInfOut && ! io.in.isZero
val overflow = commonCase && common_overflow
val underflow = commonCase && common_underflow
val inexact = overflow || (commonCase && common_inexact)
val overflow_roundMagUp =
roundingMode_near_even || roundingMode_near_maxMag || roundMagUp
val pegMinNonzeroMagOut =
commonCase && common_totalUnderflow && (roundMagUp || roundingMode_odd)
val pegMaxFiniteMagOut = overflow && ! overflow_roundMagUp
val notNaN_isInfOut =
notNaN_isSpecialInfOut || (overflow && overflow_roundMagUp)
val signOut = Mux(isNaNOut, false.B, io.in.sign)
val expOut =
(common_expOut &
~Mux(io.in.isZero || common_totalUnderflow,
(BigInt(7)<<(outExpWidth - 2)).U((outExpWidth + 1).W),
0.U
) &
~Mux(pegMinNonzeroMagOut,
~outMinNonzeroExp.U((outExpWidth + 1).W),
0.U
) &
~Mux(pegMaxFiniteMagOut,
(BigInt(1)<<(outExpWidth - 1)).U((outExpWidth + 1).W),
0.U
) &
~Mux(notNaN_isInfOut,
(BigInt(1)<<(outExpWidth - 2)).U((outExpWidth + 1).W),
0.U
)) |
Mux(pegMinNonzeroMagOut,
outMinNonzeroExp.U((outExpWidth + 1).W),
0.U
) |
Mux(pegMaxFiniteMagOut,
outMaxFiniteExp.U((outExpWidth + 1).W),
0.U
) |
Mux(notNaN_isInfOut, outInfExp.U((outExpWidth + 1).W), 0.U) |
Mux(isNaNOut, outNaNExp.U((outExpWidth + 1).W), 0.U)
val fractOut =
Mux(isNaNOut || io.in.isZero || common_totalUnderflow,
Mux(isNaNOut, (BigInt(1)<<(outSigWidth - 2)).U, 0.U),
common_fractOut
) |
Fill(outSigWidth - 1, pegMaxFiniteMagOut)
io.out := signOut ## expOut ## fractOut
io.exceptionFlags :=
io.invalidExc ## io.infiniteExc ## overflow ## underflow ## inexact
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class
RoundRawFNToRecFN(expWidth: Int, sigWidth: Int, options: Int)
extends RawModule
{
override def desiredName = s"RoundRawFNToRecFN_e${expWidth}_s${sigWidth}"
val io = IO(new Bundle {
val invalidExc = Input(Bool()) // overrides 'infiniteExc' and 'in'
val infiniteExc = Input(Bool()) // overrides 'in' except for 'in.sign'
val in = Input(new RawFloat(expWidth, sigWidth + 2))
val roundingMode = Input(UInt(3.W))
val detectTininess = Input(UInt(1.W))
val out = Output(Bits((expWidth + sigWidth + 1).W))
val exceptionFlags = Output(Bits(5.W))
})
val roundAnyRawFNToRecFN =
Module(
new RoundAnyRawFNToRecFN(
expWidth, sigWidth + 2, expWidth, sigWidth, options))
roundAnyRawFNToRecFN.io.invalidExc := io.invalidExc
roundAnyRawFNToRecFN.io.infiniteExc := io.infiniteExc
roundAnyRawFNToRecFN.io.in := io.in
roundAnyRawFNToRecFN.io.roundingMode := io.roundingMode
roundAnyRawFNToRecFN.io.detectTininess := io.detectTininess
io.out := roundAnyRawFNToRecFN.io.out
io.exceptionFlags := roundAnyRawFNToRecFN.io.exceptionFlags
}
| module RoundRawFNToRecFN_e8_s24_83( // @[RoundAnyRawFNToRecFN.scala:295:5]
input io_invalidExc, // @[RoundAnyRawFNToRecFN.scala:299:16]
input io_in_isNaN, // @[RoundAnyRawFNToRecFN.scala:299:16]
input io_in_isInf, // @[RoundAnyRawFNToRecFN.scala:299:16]
input io_in_isZero, // @[RoundAnyRawFNToRecFN.scala:299:16]
input io_in_sign, // @[RoundAnyRawFNToRecFN.scala:299:16]
input [9:0] io_in_sExp, // @[RoundAnyRawFNToRecFN.scala:299:16]
input [26:0] io_in_sig, // @[RoundAnyRawFNToRecFN.scala:299:16]
output [32:0] io_out, // @[RoundAnyRawFNToRecFN.scala:299:16]
output [4:0] io_exceptionFlags // @[RoundAnyRawFNToRecFN.scala:299:16]
);
wire io_invalidExc_0 = io_invalidExc; // @[RoundAnyRawFNToRecFN.scala:295:5]
wire io_in_isNaN_0 = io_in_isNaN; // @[RoundAnyRawFNToRecFN.scala:295:5]
wire io_in_isInf_0 = io_in_isInf; // @[RoundAnyRawFNToRecFN.scala:295:5]
wire io_in_isZero_0 = io_in_isZero; // @[RoundAnyRawFNToRecFN.scala:295:5]
wire io_in_sign_0 = io_in_sign; // @[RoundAnyRawFNToRecFN.scala:295:5]
wire [9:0] io_in_sExp_0 = io_in_sExp; // @[RoundAnyRawFNToRecFN.scala:295:5]
wire [26:0] io_in_sig_0 = io_in_sig; // @[RoundAnyRawFNToRecFN.scala:295:5]
wire io_detectTininess = 1'h1; // @[RoundAnyRawFNToRecFN.scala:295:5, :299:16, :310:15]
wire [2:0] io_roundingMode = 3'h0; // @[RoundAnyRawFNToRecFN.scala:295:5, :299:16, :310:15]
wire io_infiniteExc = 1'h0; // @[RoundAnyRawFNToRecFN.scala:295:5, :299:16, :310:15]
wire [32:0] io_out_0; // @[RoundAnyRawFNToRecFN.scala:295:5]
wire [4:0] io_exceptionFlags_0; // @[RoundAnyRawFNToRecFN.scala:295:5]
RoundAnyRawFNToRecFN_ie8_is26_oe8_os24_83 roundAnyRawFNToRecFN ( // @[RoundAnyRawFNToRecFN.scala:310:15]
.io_invalidExc (io_invalidExc_0), // @[RoundAnyRawFNToRecFN.scala:295:5]
.io_in_isNaN (io_in_isNaN_0), // @[RoundAnyRawFNToRecFN.scala:295:5]
.io_in_isInf (io_in_isInf_0), // @[RoundAnyRawFNToRecFN.scala:295:5]
.io_in_isZero (io_in_isZero_0), // @[RoundAnyRawFNToRecFN.scala:295:5]
.io_in_sign (io_in_sign_0), // @[RoundAnyRawFNToRecFN.scala:295:5]
.io_in_sExp (io_in_sExp_0), // @[RoundAnyRawFNToRecFN.scala:295:5]
.io_in_sig (io_in_sig_0), // @[RoundAnyRawFNToRecFN.scala:295:5]
.io_out (io_out_0),
.io_exceptionFlags (io_exceptionFlags_0)
); // @[RoundAnyRawFNToRecFN.scala:310:15]
assign io_out = io_out_0; // @[RoundAnyRawFNToRecFN.scala:295:5]
assign io_exceptionFlags = io_exceptionFlags_0; // @[RoundAnyRawFNToRecFN.scala:295:5]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File SynchronizerReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util.{RegEnable, Cat}
/** These wrap behavioral
* shift and next registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
*
* These are built up of *ResetSynchronizerPrimitiveShiftReg,
* intended to be replaced by the integrator's metastable flops chains or replaced
* at this level if they have a multi-bit wide synchronizer primitive.
* The different types vary in their reset behavior:
* NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin
* AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep
* 1-bit-wide shift registers.
* SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg
*
* [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference.
*
* ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross
* Clock Domains.
*/
object SynchronizerResetType extends Enumeration {
val NonSync, Inferred, Sync, Async = Value
}
// Note: this should not be used directly.
// Use the companion object to generate this with the correct reset type mixin.
private class SynchronizerPrimitiveShiftReg(
sync: Int,
init: Boolean,
resetType: SynchronizerResetType.Value)
extends AbstractPipelineReg(1) {
val initInt = if (init) 1 else 0
val initPostfix = resetType match {
case SynchronizerResetType.NonSync => ""
case _ => s"_i${initInt}"
}
override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}"
val chain = List.tabulate(sync) { i =>
val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B)
reg.suggestName(s"sync_$i")
}
chain.last := io.d.asBool
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink := source
}
io.q := chain.head.asUInt
}
private object SynchronizerPrimitiveShiftReg {
def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = {
val gen: () => SynchronizerPrimitiveShiftReg = resetType match {
case SynchronizerResetType.NonSync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
case SynchronizerResetType.Async =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset
case SynchronizerResetType.Sync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset
case SynchronizerResetType.Inferred =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
}
AbstractPipelineReg(gen(), in)
}
}
// Note: This module may end up with a non-AsyncReset type reset.
// But the Primitives within will always have AsyncReset type.
class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int)
extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asAsyncReset){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async)
}
}
io.q := Cat(output.reverse)
}
object AsyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
// Note: This module may end up with a non-Bool type reset.
// But the Primitives within will always have Bool reset type.
@deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2")
class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asBool){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync)
}
}
io.q := Cat(output.reverse)
}
object SyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred)
}
io.q := Cat(output.reverse)
}
object ResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}"
val output = Seq.tabulate(w) { i =>
SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync)
}
io.q := Cat(output.reverse)
}
object SynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, None)
def apply [T <: Data](in: T): T =
apply (in, 3, None)
}
class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module {
override def desiredName = s"ClockCrossingReg_w${w}"
val io = IO(new Bundle{
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
val en = Input(Bool())
})
val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en)
io.q := cdc_reg
}
object ClockCrossingReg {
def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = {
val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit))
name.foreach{ cdc_reg.suggestName(_) }
cdc_reg.io.d := in.asUInt
cdc_reg.io.en := en
cdc_reg.io.q.asTypeOf(in)
}
}
| module AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_277( // @[SynchronizerReg.scala:68:19]
input clock, // @[SynchronizerReg.scala:68:19]
input reset, // @[SynchronizerReg.scala:68:19]
output io_q // @[ShiftReg.scala:36:14]
);
wire io_d = 1'h1; // @[SynchronizerReg.scala:54:22, :68:19]
wire _sync_2_T = 1'h1; // @[SynchronizerReg.scala:54:22, :68:19]
wire io_q_0; // @[SynchronizerReg.scala:68:19]
reg sync_0; // @[SynchronizerReg.scala:51:87]
assign io_q_0 = sync_0; // @[SynchronizerReg.scala:51:87, :68:19]
reg sync_1; // @[SynchronizerReg.scala:51:87]
reg sync_2; // @[SynchronizerReg.scala:51:87]
always @(posedge clock or posedge reset) begin // @[SynchronizerReg.scala:68:19]
if (reset) begin // @[SynchronizerReg.scala:68:19]
sync_0 <= 1'h0; // @[SynchronizerReg.scala:51:87]
sync_1 <= 1'h0; // @[SynchronizerReg.scala:51:87]
sync_2 <= 1'h0; // @[SynchronizerReg.scala:51:87]
end
else begin // @[SynchronizerReg.scala:68:19]
sync_0 <= sync_1; // @[SynchronizerReg.scala:51:87]
sync_1 <= sync_2; // @[SynchronizerReg.scala:51:87]
sync_2 <= 1'h1; // @[SynchronizerReg.scala:51:87, :54:22, :68:19]
end
always @(posedge, posedge) |
Generate the Verilog code corresponding to the following Chisel files.
File PE.scala:
// See README.md for license details.
package gemmini
import chisel3._
import chisel3.util._
class PEControl[T <: Data : Arithmetic](accType: T) extends Bundle {
val dataflow = UInt(1.W) // TODO make this an Enum
val propagate = UInt(1.W) // Which register should be propagated (and which should be accumulated)?
val shift = UInt(log2Up(accType.getWidth).W) // TODO this isn't correct for Floats
}
class MacUnit[T <: Data](inputType: T, cType: T, dType: T) (implicit ev: Arithmetic[T]) extends Module {
import ev._
val io = IO(new Bundle {
val in_a = Input(inputType)
val in_b = Input(inputType)
val in_c = Input(cType)
val out_d = Output(dType)
})
io.out_d := io.in_c.mac(io.in_a, io.in_b)
}
// TODO update documentation
/**
* A PE implementing a MAC operation. Configured as fully combinational when integrated into a Mesh.
* @param width Data width of operands
*/
class PE[T <: Data](inputType: T, outputType: T, accType: T, df: Dataflow.Value, max_simultaneous_matmuls: Int)
(implicit ev: Arithmetic[T]) extends Module { // Debugging variables
import ev._
val io = IO(new Bundle {
val in_a = Input(inputType)
val in_b = Input(outputType)
val in_d = Input(outputType)
val out_a = Output(inputType)
val out_b = Output(outputType)
val out_c = Output(outputType)
val in_control = Input(new PEControl(accType))
val out_control = Output(new PEControl(accType))
val in_id = Input(UInt(log2Up(max_simultaneous_matmuls).W))
val out_id = Output(UInt(log2Up(max_simultaneous_matmuls).W))
val in_last = Input(Bool())
val out_last = Output(Bool())
val in_valid = Input(Bool())
val out_valid = Output(Bool())
val bad_dataflow = Output(Bool())
})
val cType = if (df == Dataflow.WS) inputType else accType
// When creating PEs that support multiple dataflows, the
// elaboration/synthesis tools often fail to consolidate and de-duplicate
// MAC units. To force mac circuitry to be re-used, we create a "mac_unit"
// module here which just performs a single MAC operation
val mac_unit = Module(new MacUnit(inputType,
if (df == Dataflow.WS) outputType else accType, outputType))
val a = io.in_a
val b = io.in_b
val d = io.in_d
val c1 = Reg(cType)
val c2 = Reg(cType)
val dataflow = io.in_control.dataflow
val prop = io.in_control.propagate
val shift = io.in_control.shift
val id = io.in_id
val last = io.in_last
val valid = io.in_valid
io.out_a := a
io.out_control.dataflow := dataflow
io.out_control.propagate := prop
io.out_control.shift := shift
io.out_id := id
io.out_last := last
io.out_valid := valid
mac_unit.io.in_a := a
val last_s = RegEnable(prop, valid)
val flip = last_s =/= prop
val shift_offset = Mux(flip, shift, 0.U)
// Which dataflow are we using?
val OUTPUT_STATIONARY = Dataflow.OS.id.U(1.W)
val WEIGHT_STATIONARY = Dataflow.WS.id.U(1.W)
// Is c1 being computed on, or propagated forward (in the output-stationary dataflow)?
val COMPUTE = 0.U(1.W)
val PROPAGATE = 1.U(1.W)
io.bad_dataflow := false.B
when ((df == Dataflow.OS).B || ((df == Dataflow.BOTH).B && dataflow === OUTPUT_STATIONARY)) {
when(prop === PROPAGATE) {
io.out_c := (c1 >> shift_offset).clippedToWidthOf(outputType)
io.out_b := b
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c2
c2 := mac_unit.io.out_d
c1 := d.withWidthOf(cType)
}.otherwise {
io.out_c := (c2 >> shift_offset).clippedToWidthOf(outputType)
io.out_b := b
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c1
c1 := mac_unit.io.out_d
c2 := d.withWidthOf(cType)
}
}.elsewhen ((df == Dataflow.WS).B || ((df == Dataflow.BOTH).B && dataflow === WEIGHT_STATIONARY)) {
when(prop === PROPAGATE) {
io.out_c := c1
mac_unit.io.in_b := c2.asTypeOf(inputType)
mac_unit.io.in_c := b
io.out_b := mac_unit.io.out_d
c1 := d
}.otherwise {
io.out_c := c2
mac_unit.io.in_b := c1.asTypeOf(inputType)
mac_unit.io.in_c := b
io.out_b := mac_unit.io.out_d
c2 := d
}
}.otherwise {
io.bad_dataflow := true.B
//assert(false.B, "unknown dataflow")
io.out_c := DontCare
io.out_b := DontCare
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c2
}
when (!valid) {
c1 := c1
c2 := c2
mac_unit.io.in_b := DontCare
mac_unit.io.in_c := DontCare
}
}
File Arithmetic.scala:
// A simple type class for Chisel datatypes that can add and multiply. To add your own type, simply create your own:
// implicit MyTypeArithmetic extends Arithmetic[MyType] { ... }
package gemmini
import chisel3._
import chisel3.util._
import hardfloat._
// Bundles that represent the raw bits of custom datatypes
case class Float(expWidth: Int, sigWidth: Int) extends Bundle {
val bits = UInt((expWidth + sigWidth).W)
val bias: Int = (1 << (expWidth-1)) - 1
}
case class DummySInt(w: Int) extends Bundle {
val bits = UInt(w.W)
def dontCare: DummySInt = {
val o = Wire(new DummySInt(w))
o.bits := 0.U
o
}
}
// The Arithmetic typeclass which implements various arithmetic operations on custom datatypes
abstract class Arithmetic[T <: Data] {
implicit def cast(t: T): ArithmeticOps[T]
}
abstract class ArithmeticOps[T <: Data](self: T) {
def *(t: T): T
def mac(m1: T, m2: T): T // Returns (m1 * m2 + self)
def +(t: T): T
def -(t: T): T
def >>(u: UInt): T // This is a rounding shift! Rounds away from 0
def >(t: T): Bool
def identity: T
def withWidthOf(t: T): T
def clippedToWidthOf(t: T): T // Like "withWidthOf", except that it saturates
def relu: T
def zero: T
def minimum: T
// Optional parameters, which only need to be defined if you want to enable various optimizations for transformers
def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[T])] = None
def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[T])] = None
def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = None
def mult_with_reciprocal[U <: Data](reciprocal: U) = self
}
object Arithmetic {
implicit object UIntArithmetic extends Arithmetic[UInt] {
override implicit def cast(self: UInt) = new ArithmeticOps(self) {
override def *(t: UInt) = self * t
override def mac(m1: UInt, m2: UInt) = m1 * m2 + self
override def +(t: UInt) = self + t
override def -(t: UInt) = self - t
override def >>(u: UInt) = {
// The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm
// TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here?
val point_five = Mux(u === 0.U, 0.U, self(u - 1.U))
val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U
val ones_digit = self(u)
val r = point_five & (zeros | ones_digit)
(self >> u).asUInt + r
}
override def >(t: UInt): Bool = self > t
override def withWidthOf(t: UInt) = self.asTypeOf(t)
override def clippedToWidthOf(t: UInt) = {
val sat = ((1 << (t.getWidth-1))-1).U
Mux(self > sat, sat, self)(t.getWidth-1, 0)
}
override def relu: UInt = self
override def zero: UInt = 0.U
override def identity: UInt = 1.U
override def minimum: UInt = 0.U
}
}
implicit object SIntArithmetic extends Arithmetic[SInt] {
override implicit def cast(self: SInt) = new ArithmeticOps(self) {
override def *(t: SInt) = self * t
override def mac(m1: SInt, m2: SInt) = m1 * m2 + self
override def +(t: SInt) = self + t
override def -(t: SInt) = self - t
override def >>(u: UInt) = {
// The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm
// TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here?
val point_five = Mux(u === 0.U, 0.U, self(u - 1.U))
val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U
val ones_digit = self(u)
val r = (point_five & (zeros | ones_digit)).asBool
(self >> u).asSInt + Mux(r, 1.S, 0.S)
}
override def >(t: SInt): Bool = self > t
override def withWidthOf(t: SInt) = {
if (self.getWidth >= t.getWidth)
self(t.getWidth-1, 0).asSInt
else {
val sign_bits = t.getWidth - self.getWidth
val sign = self(self.getWidth-1)
Cat(Cat(Seq.fill(sign_bits)(sign)), self).asTypeOf(t)
}
}
override def clippedToWidthOf(t: SInt): SInt = {
val maxsat = ((1 << (t.getWidth-1))-1).S
val minsat = (-(1 << (t.getWidth-1))).S
MuxCase(self, Seq((self > maxsat) -> maxsat, (self < minsat) -> minsat))(t.getWidth-1, 0).asSInt
}
override def relu: SInt = Mux(self >= 0.S, self, 0.S)
override def zero: SInt = 0.S
override def identity: SInt = 1.S
override def minimum: SInt = (-(1 << (self.getWidth-1))).S
override def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = {
// TODO this uses a floating point divider, but we should use an integer divider instead
val input = Wire(Decoupled(denom_t.cloneType))
val output = Wire(Decoupled(self.cloneType))
// We translate our integer to floating-point form so that we can use the hardfloat divider
val expWidth = log2Up(self.getWidth) + 1
val sigWidth = self.getWidth
def sin_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def uin_to_float(x: UInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := false.B
in_to_rec_fn.io.in := x
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = sin_to_float(self)
val denom_rec = uin_to_float(input.bits)
// Instantiate the hardloat divider
val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options))
input.ready := divider.io.inReady
divider.io.inValid := input.valid
divider.io.sqrtOp := false.B
divider.io.a := self_rec
divider.io.b := denom_rec
divider.io.roundingMode := consts.round_minMag
divider.io.detectTininess := consts.tininess_afterRounding
output.valid := divider.io.outValid_div
output.bits := float_to_in(divider.io.out)
assert(!output.valid || output.ready)
Some((input, output))
}
override def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = {
// TODO this uses a floating point divider, but we should use an integer divider instead
val input = Wire(Decoupled(UInt(0.W)))
val output = Wire(Decoupled(self.cloneType))
input.bits := DontCare
// We translate our integer to floating-point form so that we can use the hardfloat divider
val expWidth = log2Up(self.getWidth) + 1
val sigWidth = self.getWidth
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = in_to_float(self)
// Instantiate the hardloat sqrt
val sqrter = Module(new DivSqrtRecFN_small(expWidth, sigWidth, 0))
input.ready := sqrter.io.inReady
sqrter.io.inValid := input.valid
sqrter.io.sqrtOp := true.B
sqrter.io.a := self_rec
sqrter.io.b := DontCare
sqrter.io.roundingMode := consts.round_minMag
sqrter.io.detectTininess := consts.tininess_afterRounding
output.valid := sqrter.io.outValid_sqrt
output.bits := float_to_in(sqrter.io.out)
assert(!output.valid || output.ready)
Some((input, output))
}
override def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = u match {
case Float(expWidth, sigWidth) =>
val input = Wire(Decoupled(UInt(0.W)))
val output = Wire(Decoupled(u.cloneType))
input.bits := DontCare
// We translate our integer to floating-point form so that we can use the hardfloat divider
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
val self_rec = in_to_float(self)
val one_rec = in_to_float(1.S)
// Instantiate the hardloat divider
val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options))
input.ready := divider.io.inReady
divider.io.inValid := input.valid
divider.io.sqrtOp := false.B
divider.io.a := one_rec
divider.io.b := self_rec
divider.io.roundingMode := consts.round_near_even
divider.io.detectTininess := consts.tininess_afterRounding
output.valid := divider.io.outValid_div
output.bits := fNFromRecFN(expWidth, sigWidth, divider.io.out).asTypeOf(u)
assert(!output.valid || output.ready)
Some((input, output))
case _ => None
}
override def mult_with_reciprocal[U <: Data](reciprocal: U): SInt = reciprocal match {
case recip @ Float(expWidth, sigWidth) =>
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = in_to_float(self)
val reciprocal_rec = recFNFromFN(expWidth, sigWidth, recip.bits)
// Instantiate the hardloat divider
val muladder = Module(new MulRecFN(expWidth, sigWidth))
muladder.io.roundingMode := consts.round_near_even
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := reciprocal_rec
float_to_in(muladder.io.out)
case _ => self
}
}
}
implicit object FloatArithmetic extends Arithmetic[Float] {
// TODO Floating point arithmetic currently switches between recoded and standard formats for every operation. However, it should stay in the recoded format as it travels through the systolic array
override implicit def cast(self: Float): ArithmeticOps[Float] = new ArithmeticOps(self) {
override def *(t: Float): Float = {
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth))
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := t_rec_resized
val out = Wire(Float(self.expWidth, self.sigWidth))
out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
out
}
override def mac(m1: Float, m2: Float): Float = {
// Recode all operands
val m1_rec = recFNFromFN(m1.expWidth, m1.sigWidth, m1.bits)
val m2_rec = recFNFromFN(m2.expWidth, m2.sigWidth, m2.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Resize m1 to self's width
val m1_resizer = Module(new RecFNToRecFN(m1.expWidth, m1.sigWidth, self.expWidth, self.sigWidth))
m1_resizer.io.in := m1_rec
m1_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
m1_resizer.io.detectTininess := consts.tininess_afterRounding
val m1_rec_resized = m1_resizer.io.out
// Resize m2 to self's width
val m2_resizer = Module(new RecFNToRecFN(m2.expWidth, m2.sigWidth, self.expWidth, self.sigWidth))
m2_resizer.io.in := m2_rec
m2_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
m2_resizer.io.detectTininess := consts.tininess_afterRounding
val m2_rec_resized = m2_resizer.io.out
// Perform multiply-add
val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth))
muladder.io.op := 0.U
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := m1_rec_resized
muladder.io.b := m2_rec_resized
muladder.io.c := self_rec
// Convert result to standard format // TODO remove these intermediate recodings
val out = Wire(Float(self.expWidth, self.sigWidth))
out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
out
}
override def +(t: Float): Float = {
require(self.getWidth >= t.getWidth) // This just makes it easier to write the resizing code
// Recode all operands
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Generate 1 as a float
val in_to_rec_fn = Module(new INToRecFN(1, self.expWidth, self.sigWidth))
in_to_rec_fn.io.signedIn := false.B
in_to_rec_fn.io.in := 1.U
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
val one_rec = in_to_rec_fn.io.out
// Resize t
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
// Perform addition
val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth))
muladder.io.op := 0.U
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := t_rec_resized
muladder.io.b := one_rec
muladder.io.c := self_rec
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
result
}
override def -(t: Float): Float = {
val t_sgn = t.bits(t.getWidth-1)
val neg_t = Cat(~t_sgn, t.bits(t.getWidth-2,0)).asTypeOf(t)
self + neg_t
}
override def >>(u: UInt): Float = {
// Recode self
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Get 2^(-u) as a recoded float
val shift_exp = Wire(UInt(self.expWidth.W))
shift_exp := self.bias.U - u
val shift_fn = Cat(0.U(1.W), shift_exp, 0.U((self.sigWidth-1).W))
val shift_rec = recFNFromFN(self.expWidth, self.sigWidth, shift_fn)
assert(shift_exp =/= 0.U, "scaling by denormalized numbers is not currently supported")
// Multiply self and 2^(-u)
val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth))
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := shift_rec
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
result
}
override def >(t: Float): Bool = {
// Recode all operands
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Resize t to self's width
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
val comparator = Module(new CompareRecFN(self.expWidth, self.sigWidth))
comparator.io.a := self_rec
comparator.io.b := t_rec_resized
comparator.io.signaling := false.B
comparator.io.gt
}
override def withWidthOf(t: Float): Float = {
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth))
resizer.io.in := self_rec
resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
resizer.io.detectTininess := consts.tininess_afterRounding
val result = Wire(Float(t.expWidth, t.sigWidth))
result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out)
result
}
override def clippedToWidthOf(t: Float): Float = {
// TODO check for overflow. Right now, we just assume that overflow doesn't happen
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth))
resizer.io.in := self_rec
resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
resizer.io.detectTininess := consts.tininess_afterRounding
val result = Wire(Float(t.expWidth, t.sigWidth))
result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out)
result
}
override def relu: Float = {
val raw = rawFloatFromFN(self.expWidth, self.sigWidth, self.bits)
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := Mux(!raw.isZero && raw.sign, 0.U, self.bits)
result
}
override def zero: Float = 0.U.asTypeOf(self)
override def identity: Float = Cat(0.U(2.W), ~(0.U((self.expWidth-1).W)), 0.U((self.sigWidth-1).W)).asTypeOf(self)
override def minimum: Float = Cat(1.U, ~(0.U(self.expWidth.W)), 0.U((self.sigWidth-1).W)).asTypeOf(self)
}
}
implicit object DummySIntArithmetic extends Arithmetic[DummySInt] {
override implicit def cast(self: DummySInt) = new ArithmeticOps(self) {
override def *(t: DummySInt) = self.dontCare
override def mac(m1: DummySInt, m2: DummySInt) = self.dontCare
override def +(t: DummySInt) = self.dontCare
override def -(t: DummySInt) = self.dontCare
override def >>(t: UInt) = self.dontCare
override def >(t: DummySInt): Bool = false.B
override def identity = self.dontCare
override def withWidthOf(t: DummySInt) = self.dontCare
override def clippedToWidthOf(t: DummySInt) = self.dontCare
override def relu = self.dontCare
override def zero = self.dontCare
override def minimum: DummySInt = self.dontCare
}
}
}
| module PE_397( // @[PE.scala:31:7]
input clock, // @[PE.scala:31:7]
input reset, // @[PE.scala:31:7]
input [7:0] io_in_a, // @[PE.scala:35:14]
input [19:0] io_in_b, // @[PE.scala:35:14]
input [19:0] io_in_d, // @[PE.scala:35:14]
output [7:0] io_out_a, // @[PE.scala:35:14]
output [19:0] io_out_b, // @[PE.scala:35:14]
output [19:0] io_out_c, // @[PE.scala:35:14]
input io_in_control_dataflow, // @[PE.scala:35:14]
input io_in_control_propagate, // @[PE.scala:35:14]
input [4:0] io_in_control_shift, // @[PE.scala:35:14]
output io_out_control_dataflow, // @[PE.scala:35:14]
output io_out_control_propagate, // @[PE.scala:35:14]
output [4:0] io_out_control_shift, // @[PE.scala:35:14]
input [2:0] io_in_id, // @[PE.scala:35:14]
output [2:0] io_out_id, // @[PE.scala:35:14]
input io_in_last, // @[PE.scala:35:14]
output io_out_last, // @[PE.scala:35:14]
input io_in_valid, // @[PE.scala:35:14]
output io_out_valid // @[PE.scala:35:14]
);
wire [7:0] io_in_a_0 = io_in_a; // @[PE.scala:31:7]
wire [19:0] io_in_b_0 = io_in_b; // @[PE.scala:31:7]
wire [19:0] io_in_d_0 = io_in_d; // @[PE.scala:31:7]
wire io_in_control_dataflow_0 = io_in_control_dataflow; // @[PE.scala:31:7]
wire io_in_control_propagate_0 = io_in_control_propagate; // @[PE.scala:31:7]
wire [4:0] io_in_control_shift_0 = io_in_control_shift; // @[PE.scala:31:7]
wire [2:0] io_in_id_0 = io_in_id; // @[PE.scala:31:7]
wire io_in_last_0 = io_in_last; // @[PE.scala:31:7]
wire io_in_valid_0 = io_in_valid; // @[PE.scala:31:7]
wire io_bad_dataflow = 1'h0; // @[PE.scala:31:7]
wire _io_out_c_T_5 = 1'h0; // @[Arithmetic.scala:125:33]
wire _io_out_c_T_6 = 1'h0; // @[Arithmetic.scala:125:60]
wire _io_out_c_T_16 = 1'h0; // @[Arithmetic.scala:125:33]
wire _io_out_c_T_17 = 1'h0; // @[Arithmetic.scala:125:60]
wire [7:0] io_out_a_0 = io_in_a_0; // @[PE.scala:31:7]
wire [19:0] _mac_unit_io_in_b_T = io_in_b_0; // @[PE.scala:31:7, :106:37]
wire [19:0] _mac_unit_io_in_b_T_2 = io_in_b_0; // @[PE.scala:31:7, :113:37]
wire [19:0] _mac_unit_io_in_b_T_8 = io_in_b_0; // @[PE.scala:31:7, :137:35]
wire io_out_control_dataflow_0 = io_in_control_dataflow_0; // @[PE.scala:31:7]
wire io_out_control_propagate_0 = io_in_control_propagate_0; // @[PE.scala:31:7]
wire [4:0] io_out_control_shift_0 = io_in_control_shift_0; // @[PE.scala:31:7]
wire [2:0] io_out_id_0 = io_in_id_0; // @[PE.scala:31:7]
wire io_out_last_0 = io_in_last_0; // @[PE.scala:31:7]
wire io_out_valid_0 = io_in_valid_0; // @[PE.scala:31:7]
wire [19:0] io_out_b_0; // @[PE.scala:31:7]
wire [19:0] io_out_c_0; // @[PE.scala:31:7]
reg [7:0] c1; // @[PE.scala:70:15]
wire [7:0] _io_out_c_zeros_T_1 = c1; // @[PE.scala:70:15]
wire [7:0] _mac_unit_io_in_b_T_6 = c1; // @[PE.scala:70:15, :127:38]
reg [7:0] c2; // @[PE.scala:71:15]
wire [7:0] _io_out_c_zeros_T_10 = c2; // @[PE.scala:71:15]
wire [7:0] _mac_unit_io_in_b_T_4 = c2; // @[PE.scala:71:15, :121:38]
reg last_s; // @[PE.scala:89:25]
wire flip = last_s != io_in_control_propagate_0; // @[PE.scala:31:7, :89:25, :90:21]
wire [4:0] shift_offset = flip ? io_in_control_shift_0 : 5'h0; // @[PE.scala:31:7, :90:21, :91:25]
wire _GEN = shift_offset == 5'h0; // @[PE.scala:91:25]
wire _io_out_c_point_five_T; // @[Arithmetic.scala:101:32]
assign _io_out_c_point_five_T = _GEN; // @[Arithmetic.scala:101:32]
wire _io_out_c_point_five_T_5; // @[Arithmetic.scala:101:32]
assign _io_out_c_point_five_T_5 = _GEN; // @[Arithmetic.scala:101:32]
wire [5:0] _GEN_0 = {1'h0, shift_offset} - 6'h1; // @[PE.scala:91:25]
wire [5:0] _io_out_c_point_five_T_1; // @[Arithmetic.scala:101:53]
assign _io_out_c_point_five_T_1 = _GEN_0; // @[Arithmetic.scala:101:53]
wire [5:0] _io_out_c_zeros_T_2; // @[Arithmetic.scala:102:66]
assign _io_out_c_zeros_T_2 = _GEN_0; // @[Arithmetic.scala:101:53, :102:66]
wire [5:0] _io_out_c_point_five_T_6; // @[Arithmetic.scala:101:53]
assign _io_out_c_point_five_T_6 = _GEN_0; // @[Arithmetic.scala:101:53]
wire [5:0] _io_out_c_zeros_T_11; // @[Arithmetic.scala:102:66]
assign _io_out_c_zeros_T_11 = _GEN_0; // @[Arithmetic.scala:101:53, :102:66]
wire [4:0] _io_out_c_point_five_T_2 = _io_out_c_point_five_T_1[4:0]; // @[Arithmetic.scala:101:53]
wire [7:0] _io_out_c_point_five_T_3 = $signed($signed(c1) >>> _io_out_c_point_five_T_2); // @[PE.scala:70:15]
wire _io_out_c_point_five_T_4 = _io_out_c_point_five_T_3[0]; // @[Arithmetic.scala:101:50]
wire io_out_c_point_five = ~_io_out_c_point_five_T & _io_out_c_point_five_T_4; // @[Arithmetic.scala:101:{29,32,50}]
wire _GEN_1 = shift_offset < 5'h2; // @[PE.scala:91:25]
wire _io_out_c_zeros_T; // @[Arithmetic.scala:102:27]
assign _io_out_c_zeros_T = _GEN_1; // @[Arithmetic.scala:102:27]
wire _io_out_c_zeros_T_9; // @[Arithmetic.scala:102:27]
assign _io_out_c_zeros_T_9 = _GEN_1; // @[Arithmetic.scala:102:27]
wire [4:0] _io_out_c_zeros_T_3 = _io_out_c_zeros_T_2[4:0]; // @[Arithmetic.scala:102:66]
wire [31:0] _io_out_c_zeros_T_4 = 32'h1 << _io_out_c_zeros_T_3; // @[Arithmetic.scala:102:{60,66}]
wire [32:0] _io_out_c_zeros_T_5 = {1'h0, _io_out_c_zeros_T_4} - 33'h1; // @[Arithmetic.scala:102:{60,81}]
wire [31:0] _io_out_c_zeros_T_6 = _io_out_c_zeros_T_5[31:0]; // @[Arithmetic.scala:102:81]
wire [31:0] _io_out_c_zeros_T_7 = {24'h0, _io_out_c_zeros_T_6[7:0] & _io_out_c_zeros_T_1}; // @[Arithmetic.scala:102:{45,52,81}]
wire [31:0] _io_out_c_zeros_T_8 = _io_out_c_zeros_T ? 32'h0 : _io_out_c_zeros_T_7; // @[Arithmetic.scala:102:{24,27,52}]
wire io_out_c_zeros = |_io_out_c_zeros_T_8; // @[Arithmetic.scala:102:{24,89}]
wire [7:0] _GEN_2 = {3'h0, shift_offset}; // @[PE.scala:91:25]
wire [7:0] _GEN_3 = $signed($signed(c1) >>> _GEN_2); // @[PE.scala:70:15]
wire [7:0] _io_out_c_ones_digit_T; // @[Arithmetic.scala:103:30]
assign _io_out_c_ones_digit_T = _GEN_3; // @[Arithmetic.scala:103:30]
wire [7:0] _io_out_c_T; // @[Arithmetic.scala:107:15]
assign _io_out_c_T = _GEN_3; // @[Arithmetic.scala:103:30, :107:15]
wire io_out_c_ones_digit = _io_out_c_ones_digit_T[0]; // @[Arithmetic.scala:103:30]
wire _io_out_c_r_T = io_out_c_zeros | io_out_c_ones_digit; // @[Arithmetic.scala:102:89, :103:30, :105:38]
wire _io_out_c_r_T_1 = io_out_c_point_five & _io_out_c_r_T; // @[Arithmetic.scala:101:29, :105:{29,38}]
wire io_out_c_r = _io_out_c_r_T_1; // @[Arithmetic.scala:105:{29,53}]
wire [1:0] _io_out_c_T_1 = {1'h0, io_out_c_r}; // @[Arithmetic.scala:105:53, :107:33]
wire [8:0] _io_out_c_T_2 = {_io_out_c_T[7], _io_out_c_T} + {{7{_io_out_c_T_1[1]}}, _io_out_c_T_1}; // @[Arithmetic.scala:107:{15,28,33}]
wire [7:0] _io_out_c_T_3 = _io_out_c_T_2[7:0]; // @[Arithmetic.scala:107:28]
wire [7:0] _io_out_c_T_4 = _io_out_c_T_3; // @[Arithmetic.scala:107:28]
wire [19:0] _io_out_c_T_7 = {{12{_io_out_c_T_4[7]}}, _io_out_c_T_4}; // @[Mux.scala:126:16]
wire [19:0] _io_out_c_T_8 = _io_out_c_T_7; // @[Mux.scala:126:16]
wire [19:0] _io_out_c_T_9 = _io_out_c_T_8; // @[Mux.scala:126:16]
wire [19:0] _io_out_c_T_10 = _io_out_c_T_9; // @[Arithmetic.scala:125:{81,99}]
wire [19:0] _mac_unit_io_in_b_T_1 = _mac_unit_io_in_b_T; // @[PE.scala:106:37]
wire [7:0] _mac_unit_io_in_b_WIRE = _mac_unit_io_in_b_T_1[7:0]; // @[PE.scala:106:37]
wire [7:0] _c1_T = io_in_d_0[7:0]; // @[PE.scala:31:7]
wire [7:0] _c2_T = io_in_d_0[7:0]; // @[PE.scala:31:7]
wire [7:0] _c1_T_1 = _c1_T; // @[Arithmetic.scala:114:{15,33}]
wire [4:0] _io_out_c_point_five_T_7 = _io_out_c_point_five_T_6[4:0]; // @[Arithmetic.scala:101:53]
wire [7:0] _io_out_c_point_five_T_8 = $signed($signed(c2) >>> _io_out_c_point_five_T_7); // @[PE.scala:71:15]
wire _io_out_c_point_five_T_9 = _io_out_c_point_five_T_8[0]; // @[Arithmetic.scala:101:50]
wire io_out_c_point_five_1 = ~_io_out_c_point_five_T_5 & _io_out_c_point_five_T_9; // @[Arithmetic.scala:101:{29,32,50}]
wire [4:0] _io_out_c_zeros_T_12 = _io_out_c_zeros_T_11[4:0]; // @[Arithmetic.scala:102:66]
wire [31:0] _io_out_c_zeros_T_13 = 32'h1 << _io_out_c_zeros_T_12; // @[Arithmetic.scala:102:{60,66}]
wire [32:0] _io_out_c_zeros_T_14 = {1'h0, _io_out_c_zeros_T_13} - 33'h1; // @[Arithmetic.scala:102:{60,81}]
wire [31:0] _io_out_c_zeros_T_15 = _io_out_c_zeros_T_14[31:0]; // @[Arithmetic.scala:102:81]
wire [31:0] _io_out_c_zeros_T_16 = {24'h0, _io_out_c_zeros_T_15[7:0] & _io_out_c_zeros_T_10}; // @[Arithmetic.scala:102:{45,52,81}]
wire [31:0] _io_out_c_zeros_T_17 = _io_out_c_zeros_T_9 ? 32'h0 : _io_out_c_zeros_T_16; // @[Arithmetic.scala:102:{24,27,52}]
wire io_out_c_zeros_1 = |_io_out_c_zeros_T_17; // @[Arithmetic.scala:102:{24,89}]
wire [7:0] _GEN_4 = $signed($signed(c2) >>> _GEN_2); // @[PE.scala:71:15]
wire [7:0] _io_out_c_ones_digit_T_1; // @[Arithmetic.scala:103:30]
assign _io_out_c_ones_digit_T_1 = _GEN_4; // @[Arithmetic.scala:103:30]
wire [7:0] _io_out_c_T_11; // @[Arithmetic.scala:107:15]
assign _io_out_c_T_11 = _GEN_4; // @[Arithmetic.scala:103:30, :107:15]
wire io_out_c_ones_digit_1 = _io_out_c_ones_digit_T_1[0]; // @[Arithmetic.scala:103:30]
wire _io_out_c_r_T_2 = io_out_c_zeros_1 | io_out_c_ones_digit_1; // @[Arithmetic.scala:102:89, :103:30, :105:38]
wire _io_out_c_r_T_3 = io_out_c_point_five_1 & _io_out_c_r_T_2; // @[Arithmetic.scala:101:29, :105:{29,38}]
wire io_out_c_r_1 = _io_out_c_r_T_3; // @[Arithmetic.scala:105:{29,53}]
wire [1:0] _io_out_c_T_12 = {1'h0, io_out_c_r_1}; // @[Arithmetic.scala:105:53, :107:33]
wire [8:0] _io_out_c_T_13 = {_io_out_c_T_11[7], _io_out_c_T_11} + {{7{_io_out_c_T_12[1]}}, _io_out_c_T_12}; // @[Arithmetic.scala:107:{15,28,33}]
wire [7:0] _io_out_c_T_14 = _io_out_c_T_13[7:0]; // @[Arithmetic.scala:107:28]
wire [7:0] _io_out_c_T_15 = _io_out_c_T_14; // @[Arithmetic.scala:107:28]
wire [19:0] _io_out_c_T_18 = {{12{_io_out_c_T_15[7]}}, _io_out_c_T_15}; // @[Mux.scala:126:16]
wire [19:0] _io_out_c_T_19 = _io_out_c_T_18; // @[Mux.scala:126:16]
wire [19:0] _io_out_c_T_20 = _io_out_c_T_19; // @[Mux.scala:126:16]
wire [19:0] _io_out_c_T_21 = _io_out_c_T_20; // @[Arithmetic.scala:125:{81,99}]
wire [19:0] _mac_unit_io_in_b_T_3 = _mac_unit_io_in_b_T_2; // @[PE.scala:113:37]
wire [7:0] _mac_unit_io_in_b_WIRE_1 = _mac_unit_io_in_b_T_3[7:0]; // @[PE.scala:113:37]
wire [7:0] _c2_T_1 = _c2_T; // @[Arithmetic.scala:114:{15,33}]
wire [7:0] _mac_unit_io_in_b_T_5; // @[PE.scala:121:38]
assign _mac_unit_io_in_b_T_5 = _mac_unit_io_in_b_T_4; // @[PE.scala:121:38]
wire [7:0] _mac_unit_io_in_b_WIRE_2 = _mac_unit_io_in_b_T_5; // @[PE.scala:121:38]
assign io_out_c_0 = io_in_control_propagate_0 ? {{12{c1[7]}}, c1} : {{12{c2[7]}}, c2}; // @[PE.scala:31:7, :70:15, :71:15, :119:30, :120:16, :126:16]
wire [7:0] _mac_unit_io_in_b_T_7; // @[PE.scala:127:38]
assign _mac_unit_io_in_b_T_7 = _mac_unit_io_in_b_T_6; // @[PE.scala:127:38]
wire [7:0] _mac_unit_io_in_b_WIRE_3 = _mac_unit_io_in_b_T_7; // @[PE.scala:127:38]
wire [19:0] _mac_unit_io_in_b_T_9 = _mac_unit_io_in_b_T_8; // @[PE.scala:137:35]
wire [7:0] _mac_unit_io_in_b_WIRE_4 = _mac_unit_io_in_b_T_9[7:0]; // @[PE.scala:137:35]
always @(posedge clock) begin // @[PE.scala:31:7]
if (io_in_valid_0 & io_in_control_propagate_0) // @[PE.scala:31:7, :102:95, :141:17, :142:8]
c1 <= io_in_d_0[7:0]; // @[PE.scala:31:7, :70:15]
if (~(~io_in_valid_0 | io_in_control_propagate_0)) // @[PE.scala:31:7, :71:15, :102:95, :119:30, :130:10, :141:{9,17}, :143:8]
c2 <= io_in_d_0[7:0]; // @[PE.scala:31:7, :71:15]
if (io_in_valid_0) // @[PE.scala:31:7]
last_s <= io_in_control_propagate_0; // @[PE.scala:31:7, :89:25]
always @(posedge)
MacUnit_141 mac_unit ( // @[PE.scala:64:24]
.clock (clock),
.reset (reset),
.io_in_a (io_in_a_0), // @[PE.scala:31:7]
.io_in_b (io_in_control_propagate_0 ? _mac_unit_io_in_b_WIRE_2 : _mac_unit_io_in_b_WIRE_3), // @[PE.scala:31:7, :119:30, :121:{24,38}, :127:{24,38}]
.io_in_c (io_in_b_0), // @[PE.scala:31:7]
.io_out_d (io_out_b_0)
); // @[PE.scala:64:24]
assign io_out_a = io_out_a_0; // @[PE.scala:31:7]
assign io_out_b = io_out_b_0; // @[PE.scala:31:7]
assign io_out_c = io_out_c_0; // @[PE.scala:31:7]
assign io_out_control_dataflow = io_out_control_dataflow_0; // @[PE.scala:31:7]
assign io_out_control_propagate = io_out_control_propagate_0; // @[PE.scala:31:7]
assign io_out_control_shift = io_out_control_shift_0; // @[PE.scala:31:7]
assign io_out_id = io_out_id_0; // @[PE.scala:31:7]
assign io_out_last = io_out_last_0; // @[PE.scala:31:7]
assign io_out_valid = io_out_valid_0; // @[PE.scala:31:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Nodes.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.nodes._
import freechips.rocketchip.util.{AsyncQueueParams,RationalDirection}
case object TLMonitorBuilder extends Field[TLMonitorArgs => TLMonitorBase](args => new TLMonitor(args))
object TLImp extends NodeImp[TLMasterPortParameters, TLSlavePortParameters, TLEdgeOut, TLEdgeIn, TLBundle]
{
def edgeO(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeOut(pd, pu, p, sourceInfo)
def edgeI(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeIn (pd, pu, p, sourceInfo)
def bundleO(eo: TLEdgeOut) = TLBundle(eo.bundle)
def bundleI(ei: TLEdgeIn) = TLBundle(ei.bundle)
def render(ei: TLEdgeIn) = RenderedEdge(colour = "#000000" /* black */, label = (ei.manager.beatBytes * 8).toString)
override def monitor(bundle: TLBundle, edge: TLEdgeIn): Unit = {
val monitor = Module(edge.params(TLMonitorBuilder)(TLMonitorArgs(edge)))
monitor.io.in := bundle
}
override def mixO(pd: TLMasterPortParameters, node: OutwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLMasterPortParameters =
pd.v1copy(clients = pd.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })
override def mixI(pu: TLSlavePortParameters, node: InwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLSlavePortParameters =
pu.v1copy(managers = pu.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })
}
trait TLFormatNode extends FormatNode[TLEdgeIn, TLEdgeOut]
case class TLClientNode(portParams: Seq[TLMasterPortParameters])(implicit valName: ValName) extends SourceNode(TLImp)(portParams) with TLFormatNode
case class TLManagerNode(portParams: Seq[TLSlavePortParameters])(implicit valName: ValName) extends SinkNode(TLImp)(portParams) with TLFormatNode
case class TLAdapterNode(
clientFn: TLMasterPortParameters => TLMasterPortParameters = { s => s },
managerFn: TLSlavePortParameters => TLSlavePortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLImp)(clientFn, managerFn) with TLFormatNode
case class TLJunctionNode(
clientFn: Seq[TLMasterPortParameters] => Seq[TLMasterPortParameters],
managerFn: Seq[TLSlavePortParameters] => Seq[TLSlavePortParameters])(
implicit valName: ValName)
extends JunctionNode(TLImp)(clientFn, managerFn) with TLFormatNode
case class TLIdentityNode()(implicit valName: ValName) extends IdentityNode(TLImp)() with TLFormatNode
object TLNameNode {
def apply(name: ValName) = TLIdentityNode()(name)
def apply(name: Option[String]): TLIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLIdentityNode = apply(Some(name))
}
case class TLEphemeralNode()(implicit valName: ValName) extends EphemeralNode(TLImp)()
object TLTempNode {
def apply(): TLEphemeralNode = TLEphemeralNode()(ValName("temp"))
}
case class TLNexusNode(
clientFn: Seq[TLMasterPortParameters] => TLMasterPortParameters,
managerFn: Seq[TLSlavePortParameters] => TLSlavePortParameters)(
implicit valName: ValName)
extends NexusNode(TLImp)(clientFn, managerFn) with TLFormatNode
abstract class TLCustomNode(implicit valName: ValName)
extends CustomNode(TLImp) with TLFormatNode
// Asynchronous crossings
trait TLAsyncFormatNode extends FormatNode[TLAsyncEdgeParameters, TLAsyncEdgeParameters]
object TLAsyncImp extends SimpleNodeImp[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncEdgeParameters, TLAsyncBundle]
{
def edge(pd: TLAsyncClientPortParameters, pu: TLAsyncManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLAsyncEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLAsyncEdgeParameters) = new TLAsyncBundle(e.bundle)
def render(e: TLAsyncEdgeParameters) = RenderedEdge(colour = "#ff0000" /* red */, label = e.manager.async.depth.toString)
override def mixO(pd: TLAsyncClientPortParameters, node: OutwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLAsyncManagerPortParameters, node: InwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLAsyncAdapterNode(
clientFn: TLAsyncClientPortParameters => TLAsyncClientPortParameters = { s => s },
managerFn: TLAsyncManagerPortParameters => TLAsyncManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLAsyncImp)(clientFn, managerFn) with TLAsyncFormatNode
case class TLAsyncIdentityNode()(implicit valName: ValName) extends IdentityNode(TLAsyncImp)() with TLAsyncFormatNode
object TLAsyncNameNode {
def apply(name: ValName) = TLAsyncIdentityNode()(name)
def apply(name: Option[String]): TLAsyncIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLAsyncIdentityNode = apply(Some(name))
}
case class TLAsyncSourceNode(sync: Option[Int])(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLAsyncImp)(
dFn = { p => TLAsyncClientPortParameters(p) },
uFn = { p => p.base.v1copy(minLatency = p.base.minLatency + sync.getOrElse(p.async.sync)) }) with FormatNode[TLEdgeIn, TLAsyncEdgeParameters] // discard cycles in other clock domain
case class TLAsyncSinkNode(async: AsyncQueueParams)(implicit valName: ValName)
extends MixedAdapterNode(TLAsyncImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = p.base.minLatency + async.sync) },
uFn = { p => TLAsyncManagerPortParameters(async, p) }) with FormatNode[TLAsyncEdgeParameters, TLEdgeOut]
// Rationally related crossings
trait TLRationalFormatNode extends FormatNode[TLRationalEdgeParameters, TLRationalEdgeParameters]
object TLRationalImp extends SimpleNodeImp[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalEdgeParameters, TLRationalBundle]
{
def edge(pd: TLRationalClientPortParameters, pu: TLRationalManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLRationalEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLRationalEdgeParameters) = new TLRationalBundle(e.bundle)
def render(e: TLRationalEdgeParameters) = RenderedEdge(colour = "#00ff00" /* green */)
override def mixO(pd: TLRationalClientPortParameters, node: OutwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLRationalManagerPortParameters, node: InwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLRationalAdapterNode(
clientFn: TLRationalClientPortParameters => TLRationalClientPortParameters = { s => s },
managerFn: TLRationalManagerPortParameters => TLRationalManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLRationalImp)(clientFn, managerFn) with TLRationalFormatNode
case class TLRationalIdentityNode()(implicit valName: ValName) extends IdentityNode(TLRationalImp)() with TLRationalFormatNode
object TLRationalNameNode {
def apply(name: ValName) = TLRationalIdentityNode()(name)
def apply(name: Option[String]): TLRationalIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLRationalIdentityNode = apply(Some(name))
}
case class TLRationalSourceNode()(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLRationalImp)(
dFn = { p => TLRationalClientPortParameters(p) },
uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLRationalEdgeParameters] // discard cycles from other clock domain
case class TLRationalSinkNode(direction: RationalDirection)(implicit valName: ValName)
extends MixedAdapterNode(TLRationalImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = 1) },
uFn = { p => TLRationalManagerPortParameters(direction, p) }) with FormatNode[TLRationalEdgeParameters, TLEdgeOut]
// Credited version of TileLink channels
trait TLCreditedFormatNode extends FormatNode[TLCreditedEdgeParameters, TLCreditedEdgeParameters]
object TLCreditedImp extends SimpleNodeImp[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedEdgeParameters, TLCreditedBundle]
{
def edge(pd: TLCreditedClientPortParameters, pu: TLCreditedManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLCreditedEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLCreditedEdgeParameters) = new TLCreditedBundle(e.bundle)
def render(e: TLCreditedEdgeParameters) = RenderedEdge(colour = "#ffff00" /* yellow */, e.delay.toString)
override def mixO(pd: TLCreditedClientPortParameters, node: OutwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLCreditedManagerPortParameters, node: InwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLCreditedAdapterNode(
clientFn: TLCreditedClientPortParameters => TLCreditedClientPortParameters = { s => s },
managerFn: TLCreditedManagerPortParameters => TLCreditedManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLCreditedImp)(clientFn, managerFn) with TLCreditedFormatNode
case class TLCreditedIdentityNode()(implicit valName: ValName) extends IdentityNode(TLCreditedImp)() with TLCreditedFormatNode
object TLCreditedNameNode {
def apply(name: ValName) = TLCreditedIdentityNode()(name)
def apply(name: Option[String]): TLCreditedIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLCreditedIdentityNode = apply(Some(name))
}
case class TLCreditedSourceNode(delay: TLCreditedDelay)(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLCreditedImp)(
dFn = { p => TLCreditedClientPortParameters(delay, p) },
uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLCreditedEdgeParameters] // discard cycles from other clock domain
case class TLCreditedSinkNode(delay: TLCreditedDelay)(implicit valName: ValName)
extends MixedAdapterNode(TLCreditedImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = 1) },
uFn = { p => TLCreditedManagerPortParameters(delay, p) }) with FormatNode[TLCreditedEdgeParameters, TLEdgeOut]
File RegisterRouter.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.nodes._
import freechips.rocketchip.diplomacy.{AddressSet, TransferSizes}
import freechips.rocketchip.resources.{Device, Resource, ResourceBindings}
import freechips.rocketchip.prci.{NoCrossing}
import freechips.rocketchip.regmapper.{RegField, RegMapper, RegMapperParams, RegMapperInput, RegisterRouter}
import freechips.rocketchip.util.{BundleField, ControlKey, ElaborationArtefacts, GenRegDescsAnno}
import scala.math.min
class TLRegisterRouterExtraBundle(val sourceBits: Int, val sizeBits: Int) extends Bundle {
val source = UInt((sourceBits max 1).W)
val size = UInt((sizeBits max 1).W)
}
case object TLRegisterRouterExtra extends ControlKey[TLRegisterRouterExtraBundle]("tlrr_extra")
case class TLRegisterRouterExtraField(sourceBits: Int, sizeBits: Int) extends BundleField[TLRegisterRouterExtraBundle](TLRegisterRouterExtra, Output(new TLRegisterRouterExtraBundle(sourceBits, sizeBits)), x => {
x.size := 0.U
x.source := 0.U
})
/** TLRegisterNode is a specialized TL SinkNode that encapsulates MMIO registers.
* It provides functionality for describing and outputting metdata about the registers in several formats.
* It also provides a concrete implementation of a regmap function that will be used
* to wire a map of internal registers associated with this node to the node's interconnect port.
*/
case class TLRegisterNode(
address: Seq[AddressSet],
device: Device,
deviceKey: String = "reg/control",
concurrency: Int = 0,
beatBytes: Int = 4,
undefZero: Boolean = true,
executable: Boolean = false)(
implicit valName: ValName)
extends SinkNode(TLImp)(Seq(TLSlavePortParameters.v1(
Seq(TLSlaveParameters.v1(
address = address,
resources = Seq(Resource(device, deviceKey)),
executable = executable,
supportsGet = TransferSizes(1, beatBytes),
supportsPutPartial = TransferSizes(1, beatBytes),
supportsPutFull = TransferSizes(1, beatBytes),
fifoId = Some(0))), // requests are handled in order
beatBytes = beatBytes,
minLatency = min(concurrency, 1)))) with TLFormatNode // the Queue adds at most one cycle
{
val size = 1 << log2Ceil(1 + address.map(_.max).max - address.map(_.base).min)
require (size >= beatBytes)
address.foreach { case a =>
require (a.widen(size-1).base == address.head.widen(size-1).base,
s"TLRegisterNode addresses (${address}) must be aligned to its size ${size}")
}
// Calling this method causes the matching TL2 bundle to be
// configured to route all requests to the listed RegFields.
def regmap(mapping: RegField.Map*) = {
val (bundleIn, edge) = this.in(0)
val a = bundleIn.a
val d = bundleIn.d
val fields = TLRegisterRouterExtraField(edge.bundle.sourceBits, edge.bundle.sizeBits) +: a.bits.params.echoFields
val params = RegMapperParams(log2Up(size/beatBytes), beatBytes, fields)
val in = Wire(Decoupled(new RegMapperInput(params)))
in.bits.read := a.bits.opcode === TLMessages.Get
in.bits.index := edge.addr_hi(a.bits)
in.bits.data := a.bits.data
in.bits.mask := a.bits.mask
Connectable.waiveUnmatched(in.bits.extra, a.bits.echo) match {
case (lhs, rhs) => lhs :<= rhs
}
val a_extra = in.bits.extra(TLRegisterRouterExtra)
a_extra.source := a.bits.source
a_extra.size := a.bits.size
// Invoke the register map builder
val out = RegMapper(beatBytes, concurrency, undefZero, in, mapping:_*)
// No flow control needed
in.valid := a.valid
a.ready := in.ready
d.valid := out.valid
out.ready := d.ready
// We must restore the size to enable width adapters to work
val d_extra = out.bits.extra(TLRegisterRouterExtra)
d.bits := edge.AccessAck(toSource = d_extra.source, lgSize = d_extra.size)
// avoid a Mux on the data bus by manually overriding two fields
d.bits.data := out.bits.data
Connectable.waiveUnmatched(d.bits.echo, out.bits.extra) match {
case (lhs, rhs) => lhs :<= rhs
}
d.bits.opcode := Mux(out.bits.read, TLMessages.AccessAckData, TLMessages.AccessAck)
// Tie off unused channels
bundleIn.b.valid := false.B
bundleIn.c.ready := true.B
bundleIn.e.ready := true.B
genRegDescsJson(mapping:_*)
}
def genRegDescsJson(mapping: RegField.Map*): Unit = {
// Dump out the register map for documentation purposes.
val base = address.head.base
val baseHex = s"0x${base.toInt.toHexString}"
val name = s"${device.describe(ResourceBindings()).name}.At${baseHex}"
val json = GenRegDescsAnno.serialize(base, name, mapping:_*)
var suffix = 0
while( ElaborationArtefacts.contains(s"${baseHex}.${suffix}.regmap.json")) {
suffix = suffix + 1
}
ElaborationArtefacts.add(s"${baseHex}.${suffix}.regmap.json", json)
val module = Module.currentModule.get.asInstanceOf[RawModule]
GenRegDescsAnno.anno(
module,
base,
mapping:_*)
}
}
/** Mix HasTLControlRegMap into any subclass of RegisterRouter to gain helper functions for attaching a device control register map to TileLink.
* - The intended use case is that controlNode will diplomatically publish a SW-visible device's memory-mapped control registers.
* - Use the clock crossing helper controlXing to externally connect controlNode to a TileLink interconnect.
* - Use the mapping helper function regmap to internally fill out the space of device control registers.
*/
trait HasTLControlRegMap { this: RegisterRouter =>
protected val controlNode = TLRegisterNode(
address = address,
device = device,
deviceKey = "reg/control",
concurrency = concurrency,
beatBytes = beatBytes,
undefZero = undefZero,
executable = executable)
// Externally, this helper should be used to connect the register control port to a bus
val controlXing: TLInwardClockCrossingHelper = this.crossIn(controlNode)
// Backwards-compatibility default node accessor with no clock crossing
lazy val node: TLInwardNode = controlXing(NoCrossing)
// Internally, this function should be used to populate the control port with registers
protected def regmap(mapping: RegField.Map*): Unit = { controlNode.regmap(mapping:_*) }
}
File RegField.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.regmapper
import chisel3._
import chisel3.util.{DecoupledIO, ReadyValidIO}
import org.json4s.JsonDSL._
import org.json4s.JsonAST.JValue
import freechips.rocketchip.util.{SimpleRegIO}
case class RegReadFn private(combinational: Boolean, fn: (Bool, Bool) => (Bool, Bool, UInt))
object RegReadFn
{
// (ivalid: Bool, oready: Bool) => (iready: Bool, ovalid: Bool, data: UInt)
// iready may combinationally depend on oready
// all other combinational dependencies forbidden (e.g. ovalid <= ivalid)
// effects must become visible on the cycle after ovalid && oready
// data is only inspected when ovalid && oready
implicit def apply(x: (Bool, Bool) => (Bool, Bool, UInt)) =
new RegReadFn(false, x)
implicit def apply(x: RegisterReadIO[UInt]): RegReadFn =
RegReadFn((ivalid, oready) => {
x.request.valid := ivalid
x.response.ready := oready
(x.request.ready, x.response.valid, x.response.bits)
})
// (ready: Bool) => (valid: Bool, data: UInt)
// valid must not combinationally depend on ready
// effects must become visible on the cycle after valid && ready
implicit def apply(x: Bool => (Bool, UInt)) =
new RegReadFn(true, { case (_, oready) =>
val (ovalid, data) = x(oready)
(true.B, ovalid, data)
})
// read from a ReadyValidIO (only safe if there is a consistent source of data)
implicit def apply(x: ReadyValidIO[UInt]):RegReadFn = RegReadFn(ready => { x.ready := ready; (x.valid, x.bits) })
// read from a register
implicit def apply(x: UInt):RegReadFn = RegReadFn(ready => (true.B, x))
// noop
implicit def apply(x: Unit):RegReadFn = RegReadFn(0.U)
}
case class RegWriteFn private(combinational: Boolean, fn: (Bool, Bool, UInt) => (Bool, Bool))
object RegWriteFn
{
// (ivalid: Bool, oready: Bool, data: UInt) => (iready: Bool, ovalid: Bool)
// iready may combinationally depend on both oready and data
// all other combinational dependencies forbidden (e.g. ovalid <= ivalid)
// effects must become visible on the cycle after ovalid && oready
// data should only be used for an effect when ivalid && iready
implicit def apply(x: (Bool, Bool, UInt) => (Bool, Bool)) =
new RegWriteFn(false, x)
implicit def apply(x: RegisterWriteIO[UInt]): RegWriteFn =
RegWriteFn((ivalid, oready, data) => {
x.request.valid := ivalid
x.request.bits := data
x.response.ready := oready
(x.request.ready, x.response.valid)
})
// (valid: Bool, data: UInt) => (ready: Bool)
// ready may combinationally depend on data (but not valid)
// effects must become visible on the cycle after valid && ready
implicit def apply(x: (Bool, UInt) => Bool) =
// combinational => data valid on oready
new RegWriteFn(true, { case (_, oready, data) =>
(true.B, x(oready, data))
})
// write to a DecoupledIO (only safe if there is a consistent sink draining data)
// NOTE: this is not an IrrevocableIO (even on TL2) because other fields could cause a lowered valid
implicit def apply(x: DecoupledIO[UInt]): RegWriteFn = RegWriteFn((valid, data) => { x.valid := valid; x.bits := data; x.ready })
// updates a register (or adds a mux to a wire)
implicit def apply(x: UInt): RegWriteFn = RegWriteFn((valid, data) => { when (valid) { x := data }; true.B })
// noop
implicit def apply(x: Unit): RegWriteFn = RegWriteFn((valid, data) => { true.B })
}
case class RegField(width: Int, read: RegReadFn, write: RegWriteFn, desc: Option[RegFieldDesc])
{
require (width >= 0, s"RegField width must be >= 0, not $width")
def pipelined = !read.combinational || !write.combinational
def readOnly = this.copy(write = (), desc = this.desc.map(_.copy(access = RegFieldAccessType.R)))
def toJson(byteOffset: Int, bitOffset: Int): JValue = {
( ("byteOffset" -> s"0x${byteOffset.toHexString}") ~
("bitOffset" -> bitOffset) ~
("bitWidth" -> width) ~
("name" -> desc.map(_.name)) ~
("description" -> desc.map{ d=> if (d.desc == "") None else Some(d.desc)}) ~
("resetValue" -> desc.map{_.reset}) ~
("group" -> desc.map{_.group}) ~
("groupDesc" -> desc.map{_.groupDesc}) ~
("accessType" -> desc.map {d => d.access.toString}) ~
("writeType" -> desc.map {d => d.wrType.map(_.toString)}) ~
("readAction" -> desc.map {d => d.rdAction.map(_.toString)}) ~
("volatile" -> desc.map {d => if (d.volatile) Some(true) else None}) ~
("enumerations" -> desc.map {d =>
Option(d.enumerations.map { case (key, (name, edesc)) =>
(("value" -> key) ~ ("name" -> name) ~ ("description" -> edesc))
}).filter(_.nonEmpty)}) )
}
}
object RegField
{
// Byte address => sequence of bitfields, lowest index => lowest address
type Map = (Int, Seq[RegField])
def apply(n: Int) : RegField = apply(n, (), (), Some(RegFieldDesc.reserved))
def apply(n: Int, desc: RegFieldDesc) : RegField = apply(n, (), (), Some(desc))
def apply(n: Int, r: RegReadFn, w: RegWriteFn) : RegField = apply(n, r, w, None)
def apply(n: Int, r: RegReadFn, w: RegWriteFn, desc: RegFieldDesc) : RegField = apply(n, r, w, Some(desc))
def apply(n: Int, rw: UInt) : RegField = apply(n, rw, rw, None)
def apply(n: Int, rw: UInt, desc: RegFieldDesc) : RegField = apply(n, rw, rw, Some(desc))
def r(n: Int, r: RegReadFn) : RegField = apply(n, r, (), None)
def r(n: Int, r: RegReadFn, desc: RegFieldDesc) : RegField = apply(n, r, (), Some(desc.copy(access = RegFieldAccessType.R)))
def w(n: Int, w: RegWriteFn) : RegField = apply(n, (), w, None)
def w(n: Int, w: RegWriteFn, desc: RegFieldDesc) : RegField = apply(n, (), w, Some(desc.copy(access = RegFieldAccessType.W)))
// This RegField allows 'set' to set bits in 'reg'.
// and to clear bits when the bus writes bits of value 1.
// Setting takes priority over clearing.
def w1ToClear(n: Int, reg: UInt, set: UInt, desc: Option[RegFieldDesc] = None): RegField =
RegField(n, reg, RegWriteFn((valid, data) => { reg := (~((~reg) | Mux(valid, data, 0.U))) | set; true.B }),
desc.map{_.copy(access = RegFieldAccessType.RW, wrType=Some(RegFieldWrType.ONE_TO_CLEAR), volatile = true)})
// This RegField wraps an explicit register
// (e.g. Black-Boxed Register) to create a R/W register.
def rwReg(n: Int, bb: SimpleRegIO, desc: Option[RegFieldDesc] = None) : RegField =
RegField(n, bb.q, RegWriteFn((valid, data) => {
bb.en := valid
bb.d := data
true.B
}), desc)
// Create byte-sized read-write RegFields out of a large UInt register.
// It is updated when any of the (implemented) bytes are written, the non-written
// bytes are just copied over from their current value.
// Because the RegField are all byte-sized, this is also suitable when a register is larger
// than the intended bus width of the device (atomic updates are impossible).
def bytes(reg: UInt, numBytes: Int, desc: Option[RegFieldDesc]): Seq[RegField] = {
require(reg.getWidth * 8 >= numBytes, "Can't break a ${reg.getWidth}-bit-wide register into only ${numBytes} bytes.")
val numFullBytes = reg.getWidth/8
val numPartialBytes = if ((reg.getWidth % 8) > 0) 1 else 0
val numPadBytes = numBytes - numFullBytes - numPartialBytes
val pad = reg | 0.U((8*numBytes).W)
val oldBytes = VecInit.tabulate(numBytes) { i => pad(8*(i+1)-1, 8*i) }
val newBytes = WireDefault(oldBytes)
val valids = WireDefault(VecInit.fill(numBytes) { false.B })
when (valids.reduce(_ || _)) { reg := newBytes.asUInt }
def wrFn(i: Int): RegWriteFn = RegWriteFn((valid, data) => {
valids(i) := valid
when (valid) {newBytes(i) := data}
true.B
})
val fullBytes = Seq.tabulate(numFullBytes) { i =>
val newDesc = desc.map {d => d.copy(name = d.name + s"_$i")}
RegField(8, oldBytes(i), wrFn(i), newDesc)}
val partialBytes = if (numPartialBytes > 0) {
val newDesc = desc.map {d => d.copy(name = d.name + s"_$numFullBytes")}
Seq(RegField(reg.getWidth % 8, oldBytes(numFullBytes), wrFn(numFullBytes), newDesc),
RegField(8 - (reg.getWidth % 8)))
} else Nil
val padBytes = Seq.fill(numPadBytes){RegField(8)}
fullBytes ++ partialBytes ++ padBytes
}
def bytes(reg: UInt, desc: Option[RegFieldDesc]): Seq[RegField] = {
val width = reg.getWidth
require (width % 8 == 0, s"RegField.bytes must be called on byte-sized reg, not ${width} bits")
bytes(reg, width/8, desc)
}
def bytes(reg: UInt, numBytes: Int): Seq[RegField] = bytes(reg, numBytes, None)
def bytes(reg: UInt): Seq[RegField] = bytes(reg, None)
}
trait HasRegMap
{
def regmap(mapping: RegField.Map*): Unit
val interrupts: Vec[Bool]
}
// See Example.scala for an example of how to use regmap
File MuxLiteral.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util.log2Ceil
import scala.reflect.ClassTag
/* MuxLiteral creates a lookup table from a key to a list of values.
* Unlike MuxLookup, the table keys must be exclusive literals.
*/
object MuxLiteral
{
def apply[T <: Data:ClassTag](index: UInt, default: T, first: (UInt, T), rest: (UInt, T)*): T =
apply(index, default, first :: rest.toList)
def apply[T <: Data:ClassTag](index: UInt, default: T, cases: Seq[(UInt, T)]): T =
MuxTable(index, default, cases.map { case (k, v) => (k.litValue, v) })
}
object MuxSeq
{
def apply[T <: Data:ClassTag](index: UInt, default: T, first: T, rest: T*): T =
apply(index, default, first :: rest.toList)
def apply[T <: Data:ClassTag](index: UInt, default: T, cases: Seq[T]): T =
MuxTable(index, default, cases.zipWithIndex.map { case (v, i) => (BigInt(i), v) })
}
object MuxTable
{
def apply[T <: Data:ClassTag](index: UInt, default: T, first: (BigInt, T), rest: (BigInt, T)*): T =
apply(index, default, first :: rest.toList)
def apply[T <: Data:ClassTag](index: UInt, default: T, cases: Seq[(BigInt, T)]): T = {
/* All keys must be >= 0 and distinct */
cases.foreach { case (k, _) => require (k >= 0) }
require (cases.map(_._1).distinct.size == cases.size)
/* Filter out any cases identical to the default */
val simple = cases.filter { case (k, v) => !default.isLit || !v.isLit || v.litValue != default.litValue }
val maxKey = (BigInt(0) +: simple.map(_._1)).max
val endIndex = BigInt(1) << log2Ceil(maxKey+1)
if (simple.isEmpty) {
default
} else if (endIndex <= 2*simple.size) {
/* The dense encoding case uses a Vec */
val table = Array.fill(endIndex.toInt) { default }
simple.foreach { case (k, v) => table(k.toInt) = v }
Mux(index >= endIndex.U, default, VecInit(table)(index))
} else {
/* The sparse encoding case uses switch */
val out = WireDefault(default)
simple.foldLeft(new chisel3.util.SwitchContext(index, None, Set.empty)) { case (acc, (k, v)) =>
acc.is (k.U) { out := v }
}
out
}
}
}
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
File MixedNode.scala:
package org.chipsalliance.diplomacy.nodes
import chisel3.{Data, DontCare, Wire}
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.{Field, Parameters}
import org.chipsalliance.diplomacy.ValName
import org.chipsalliance.diplomacy.sourceLine
/** One side metadata of a [[Dangle]].
*
* Describes one side of an edge going into or out of a [[BaseNode]].
*
* @param serial
* the global [[BaseNode.serial]] number of the [[BaseNode]] that this [[HalfEdge]] connects to.
* @param index
* the `index` in the [[BaseNode]]'s input or output port list that this [[HalfEdge]] belongs to.
*/
case class HalfEdge(serial: Int, index: Int) extends Ordered[HalfEdge] {
import scala.math.Ordered.orderingToOrdered
def compare(that: HalfEdge): Int = HalfEdge.unapply(this).compare(HalfEdge.unapply(that))
}
/** [[Dangle]] captures the `IO` information of a [[LazyModule]] and which two [[BaseNode]]s the [[Edges]]/[[Bundle]]
* connects.
*
* [[Dangle]]s are generated by [[BaseNode.instantiate]] using [[MixedNode.danglesOut]] and [[MixedNode.danglesIn]] ,
* [[LazyModuleImp.instantiate]] connects those that go to internal or explicit IO connections in a [[LazyModule]].
*
* @param source
* the source [[HalfEdge]] of this [[Dangle]], which captures the source [[BaseNode]] and the port `index` within
* that [[BaseNode]].
* @param sink
* sink [[HalfEdge]] of this [[Dangle]], which captures the sink [[BaseNode]] and the port `index` within that
* [[BaseNode]].
* @param flipped
* flip or not in [[AutoBundle.makeElements]]. If true this corresponds to `danglesOut`, if false it corresponds to
* `danglesIn`.
* @param dataOpt
* actual [[Data]] for the hardware connection. Can be empty if this belongs to a cloned module
*/
case class Dangle(source: HalfEdge, sink: HalfEdge, flipped: Boolean, name: String, dataOpt: Option[Data]) {
def data = dataOpt.get
}
/** [[Edges]] is a collection of parameters describing the functionality and connection for an interface, which is often
* derived from the interconnection protocol and can inform the parameterization of the hardware bundles that actually
* implement the protocol.
*/
case class Edges[EI, EO](in: Seq[EI], out: Seq[EO])
/** A field available in [[Parameters]] used to determine whether [[InwardNodeImp.monitor]] will be called. */
case object MonitorsEnabled extends Field[Boolean](true)
/** When rendering the edge in a graphical format, flip the order in which the edges' source and sink are presented.
*
* For example, when rendering graphML, yEd by default tries to put the source node vertically above the sink node, but
* [[RenderFlipped]] inverts this relationship. When a particular [[LazyModule]] contains both source nodes and sink
* nodes, flipping the rendering of one node's edge will usual produce a more concise visual layout for the
* [[LazyModule]].
*/
case object RenderFlipped extends Field[Boolean](false)
/** The sealed node class in the package, all node are derived from it.
*
* @param inner
* Sink interface implementation.
* @param outer
* Source interface implementation.
* @param valName
* val name of this node.
* @tparam DI
* Downward-flowing parameters received on the inner side of the node. It is usually a brunch of parameters
* describing the protocol parameters from a source. For an [[InwardNode]], it is determined by the connected
* [[OutwardNode]]. Since it can be connected to multiple sources, this parameter is always a Seq of source port
* parameters.
* @tparam UI
* Upward-flowing parameters generated by the inner side of the node. It is usually a brunch of parameters describing
* the protocol parameters of a sink. For an [[InwardNode]], it is determined itself.
* @tparam EI
* Edge Parameters describing a connection on the inner side of the node. It is usually a brunch of transfers
* specified for a sink according to protocol.
* @tparam BI
* Bundle type used when connecting to the inner side of the node. It is a hardware interface of this sink interface.
* It should extends from [[chisel3.Data]], which represents the real hardware.
* @tparam DO
* Downward-flowing parameters generated on the outer side of the node. It is usually a brunch of parameters
* describing the protocol parameters of a source. For an [[OutwardNode]], it is determined itself.
* @tparam UO
* Upward-flowing parameters received by the outer side of the node. It is usually a brunch of parameters describing
* the protocol parameters from a sink. For an [[OutwardNode]], it is determined by the connected [[InwardNode]].
* Since it can be connected to multiple sinks, this parameter is always a Seq of sink port parameters.
* @tparam EO
* Edge Parameters describing a connection on the outer side of the node. It is usually a brunch of transfers
* specified for a source according to protocol.
* @tparam BO
* Bundle type used when connecting to the outer side of the node. It is a hardware interface of this source
* interface. It should extends from [[chisel3.Data]], which represents the real hardware.
*
* @note
* Call Graph of [[MixedNode]]
* - line `─`: source is process by a function and generate pass to others
* - Arrow `→`: target of arrow is generated by source
*
* {{{
* (from the other node)
* ┌─────────────────────────────────────────────────────────[[InwardNode.uiParams]]─────────────┐
* ↓ │
* (binding node when elaboration) [[OutwardNode.uoParams]]────────────────────────[[MixedNode.mapParamsU]]→──────────┐ │
* [[InwardNode.accPI]] │ │ │
* │ │ (based on protocol) │
* │ │ [[MixedNode.inner.edgeI]] │
* │ │ ↓ │
* ↓ │ │ │
* (immobilize after elaboration) (inward port from [[OutwardNode]]) │ ↓ │
* [[InwardNode.iBindings]]──┐ [[MixedNode.iDirectPorts]]────────────────────→[[MixedNode.iPorts]] [[InwardNode.uiParams]] │
* │ │ ↑ │ │ │
* │ │ │ [[OutwardNode.doParams]] │ │
* │ │ │ (from the other node) │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* │ │ │ └────────┬──────────────┤ │
* │ │ │ │ │ │
* │ │ │ │ (based on protocol) │
* │ │ │ │ [[MixedNode.inner.edgeI]] │
* │ │ │ │ │ │
* │ │ (from the other node) │ ↓ │
* │ └───[[OutwardNode.oPortMapping]] [[OutwardNode.oStar]] │ [[MixedNode.edgesIn]]───┐ │
* │ ↑ ↑ │ │ ↓ │
* │ │ │ │ │ [[MixedNode.in]] │
* │ │ │ │ ↓ ↑ │
* │ (solve star connection) │ │ │ [[MixedNode.bundleIn]]──┘ │
* ├───[[MixedNode.resolveStar]]→─┼─────────────────────────────┤ └────────────────────────────────────┐ │
* │ │ │ [[MixedNode.bundleOut]]─┐ │ │
* │ │ │ ↑ ↓ │ │
* │ │ │ │ [[MixedNode.out]] │ │
* │ ↓ ↓ │ ↑ │ │
* │ ┌─────[[InwardNode.iPortMapping]] [[InwardNode.iStar]] [[MixedNode.edgesOut]]──┘ │ │
* │ │ (from the other node) ↑ │ │
* │ │ │ │ │ │
* │ │ │ [[MixedNode.outer.edgeO]] │ │
* │ │ │ (based on protocol) │ │
* │ │ │ │ │ │
* │ │ │ ┌────────────────────────────────────────┤ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* (immobilize after elaboration)│ ↓ │ │ │ │
* [[OutwardNode.oBindings]]─┘ [[MixedNode.oDirectPorts]]───→[[MixedNode.oPorts]] [[OutwardNode.doParams]] │ │
* ↑ (inward port from [[OutwardNode]]) │ │ │ │
* │ ┌─────────────────────────────────────────┤ │ │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* [[OutwardNode.accPO]] │ ↓ │ │ │
* (binding node when elaboration) │ [[InwardNode.diParams]]─────→[[MixedNode.mapParamsD]]────────────────────────────┘ │ │
* │ ↑ │ │
* │ └──────────────────────────────────────────────────────────────────────────────────────────┘ │
* └──────────────────────────────────────────────────────────────────────────────────────────────────────────┘
* }}}
*/
abstract class MixedNode[DI, UI, EI, BI <: Data, DO, UO, EO, BO <: Data](
val inner: InwardNodeImp[DI, UI, EI, BI],
val outer: OutwardNodeImp[DO, UO, EO, BO]
)(
implicit valName: ValName)
extends BaseNode
with NodeHandle[DI, UI, EI, BI, DO, UO, EO, BO]
with InwardNode[DI, UI, BI]
with OutwardNode[DO, UO, BO] {
// Generate a [[NodeHandle]] with inward and outward node are both this node.
val inward = this
val outward = this
/** Debug info of nodes binding. */
def bindingInfo: String = s"""$iBindingInfo
|$oBindingInfo
|""".stripMargin
/** Debug info of ports connecting. */
def connectedPortsInfo: String = s"""${oPorts.size} outward ports connected: [${oPorts.map(_._2.name).mkString(",")}]
|${iPorts.size} inward ports connected: [${iPorts.map(_._2.name).mkString(",")}]
|""".stripMargin
/** Debug info of parameters propagations. */
def parametersInfo: String = s"""${doParams.size} downstream outward parameters: [${doParams.mkString(",")}]
|${uoParams.size} upstream outward parameters: [${uoParams.mkString(",")}]
|${diParams.size} downstream inward parameters: [${diParams.mkString(",")}]
|${uiParams.size} upstream inward parameters: [${uiParams.mkString(",")}]
|""".stripMargin
/** For a given node, converts [[OutwardNode.accPO]] and [[InwardNode.accPI]] to [[MixedNode.oPortMapping]] and
* [[MixedNode.iPortMapping]].
*
* Given counts of known inward and outward binding and inward and outward star bindings, return the resolved inward
* stars and outward stars.
*
* This method will also validate the arguments and throw a runtime error if the values are unsuitable for this type
* of node.
*
* @param iKnown
* Number of known-size ([[BIND_ONCE]]) input bindings.
* @param oKnown
* Number of known-size ([[BIND_ONCE]]) output bindings.
* @param iStar
* Number of unknown size ([[BIND_STAR]]) input bindings.
* @param oStar
* Number of unknown size ([[BIND_STAR]]) output bindings.
* @return
* A Tuple of the resolved number of input and output connections.
*/
protected[diplomacy] def resolveStar(iKnown: Int, oKnown: Int, iStar: Int, oStar: Int): (Int, Int)
/** Function to generate downward-flowing outward params from the downward-flowing input params and the current output
* ports.
*
* @param n
* The size of the output sequence to generate.
* @param p
* Sequence of downward-flowing input parameters of this node.
* @return
* A `n`-sized sequence of downward-flowing output edge parameters.
*/
protected[diplomacy] def mapParamsD(n: Int, p: Seq[DI]): Seq[DO]
/** Function to generate upward-flowing input parameters from the upward-flowing output parameters [[uiParams]].
*
* @param n
* Size of the output sequence.
* @param p
* Upward-flowing output edge parameters.
* @return
* A n-sized sequence of upward-flowing input edge parameters.
*/
protected[diplomacy] def mapParamsU(n: Int, p: Seq[UO]): Seq[UI]
/** @return
* The sink cardinality of the node, the number of outputs bound with [[BIND_QUERY]] summed with inputs bound with
* [[BIND_STAR]].
*/
protected[diplomacy] lazy val sinkCard: Int = oBindings.count(_._3 == BIND_QUERY) + iBindings.count(_._3 == BIND_STAR)
/** @return
* The source cardinality of this node, the number of inputs bound with [[BIND_QUERY]] summed with the number of
* output bindings bound with [[BIND_STAR]].
*/
protected[diplomacy] lazy val sourceCard: Int =
iBindings.count(_._3 == BIND_QUERY) + oBindings.count(_._3 == BIND_STAR)
/** @return list of nodes involved in flex bindings with this node. */
protected[diplomacy] lazy val flexes: Seq[BaseNode] =
oBindings.filter(_._3 == BIND_FLEX).map(_._2) ++ iBindings.filter(_._3 == BIND_FLEX).map(_._2)
/** Resolves the flex to be either source or sink and returns the offset where the [[BIND_STAR]] operators begin
* greedily taking up the remaining connections.
*
* @return
* A value >= 0 if it is sink cardinality, a negative value for source cardinality. The magnitude of the return
* value is not relevant.
*/
protected[diplomacy] lazy val flexOffset: Int = {
/** Recursively performs a depth-first search of the [[flexes]], [[BaseNode]]s connected to this node with flex
* operators. The algorithm bottoms out when we either get to a node we have already visited or when we get to a
* connection that is not a flex and can set the direction for us. Otherwise, recurse by visiting the `flexes` of
* each node in the current set and decide whether they should be added to the set or not.
*
* @return
* the mapping of [[BaseNode]] indexed by their serial numbers.
*/
def DFS(v: BaseNode, visited: Map[Int, BaseNode]): Map[Int, BaseNode] = {
if (visited.contains(v.serial) || !v.flexibleArityDirection) {
visited
} else {
v.flexes.foldLeft(visited + (v.serial -> v))((sum, n) => DFS(n, sum))
}
}
/** Determine which [[BaseNode]] are involved in resolving the flex connections to/from this node.
*
* @example
* {{{
* a :*=* b :*=* c
* d :*=* b
* e :*=* f
* }}}
*
* `flexSet` for `a`, `b`, `c`, or `d` will be `Set(a, b, c, d)` `flexSet` for `e` or `f` will be `Set(e,f)`
*/
val flexSet = DFS(this, Map()).values
/** The total number of :*= operators where we're on the left. */
val allSink = flexSet.map(_.sinkCard).sum
/** The total number of :=* operators used when we're on the right. */
val allSource = flexSet.map(_.sourceCard).sum
require(
allSink == 0 || allSource == 0,
s"The nodes ${flexSet.map(_.name)} which are inter-connected by :*=* have ${allSink} :*= operators and ${allSource} :=* operators connected to them, making it impossible to determine cardinality inference direction."
)
allSink - allSource
}
/** @return A value >= 0 if it is sink cardinality, a negative value for source cardinality. */
protected[diplomacy] def edgeArityDirection(n: BaseNode): Int = {
if (flexibleArityDirection) flexOffset
else if (n.flexibleArityDirection) n.flexOffset
else 0
}
/** For a node which is connected between two nodes, select the one that will influence the direction of the flex
* resolution.
*/
protected[diplomacy] def edgeAritySelect(n: BaseNode, l: => Int, r: => Int): Int = {
val dir = edgeArityDirection(n)
if (dir < 0) l
else if (dir > 0) r
else 1
}
/** Ensure that the same node is not visited twice in resolving `:*=`, etc operators. */
private var starCycleGuard = false
/** Resolve all the star operators into concrete indicies. As connections are being made, some may be "star"
* connections which need to be resolved. In some way to determine how many actual edges they correspond to. We also
* need to build up the ranges of edges which correspond to each binding operator, so that We can apply the correct
* edge parameters and later build up correct bundle connections.
*
* [[oPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that oPort (binding
* operator). [[iPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that iPort
* (binding operator). [[oStar]]: `Int` the value to return for this node `N` for any `N :*= foo` or `N :*=* foo :*=
* bar` [[iStar]]: `Int` the value to return for this node `N` for any `foo :=* N` or `bar :=* foo :*=* N`
*/
protected[diplomacy] lazy val (
oPortMapping: Seq[(Int, Int)],
iPortMapping: Seq[(Int, Int)],
oStar: Int,
iStar: Int
) = {
try {
if (starCycleGuard) throw StarCycleException()
starCycleGuard = true
// For a given node N...
// Number of foo :=* N
// + Number of bar :=* foo :*=* N
val oStars = oBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) < 0)
}
// Number of N :*= foo
// + Number of N :*=* foo :*= bar
val iStars = iBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) > 0)
}
// 1 for foo := N
// + bar.iStar for bar :*= foo :*=* N
// + foo.iStar for foo :*= N
// + 0 for foo :=* N
val oKnown = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, 0, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => 0
}
}.sum
// 1 for N := foo
// + bar.oStar for N :*=* foo :=* bar
// + foo.oStar for N :=* foo
// + 0 for N :*= foo
val iKnown = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, 0)
case BIND_QUERY => n.oStar
case BIND_STAR => 0
}
}.sum
// Resolve star depends on the node subclass to implement the algorithm for this.
val (iStar, oStar) = resolveStar(iKnown, oKnown, iStars, oStars)
// Cumulative list of resolved outward binding range starting points
val oSum = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, oStar, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => oStar
}
}.scanLeft(0)(_ + _)
// Cumulative list of resolved inward binding range starting points
val iSum = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, iStar)
case BIND_QUERY => n.oStar
case BIND_STAR => iStar
}
}.scanLeft(0)(_ + _)
// Create ranges for each binding based on the running sums and return
// those along with resolved values for the star operations.
(oSum.init.zip(oSum.tail), iSum.init.zip(iSum.tail), oStar, iStar)
} catch {
case c: StarCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Sequence of inward ports.
*
* This should be called after all star bindings are resolved.
*
* Each element is: `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding.
* `n` Instance of inward node. `p` View of [[Parameters]] where this connection was made. `s` Source info where this
* connection was made in the source code.
*/
protected[diplomacy] lazy val oDirectPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] =
oBindings.flatMap { case (i, n, _, p, s) =>
// for each binding operator in this node, look at what it connects to
val (start, end) = n.iPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
/** Sequence of outward ports.
*
* This should be called after all star bindings are resolved.
*
* `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. `n` Instance of
* outward node. `p` View of [[Parameters]] where this connection was made. `s` [[SourceInfo]] where this connection
* was made in the source code.
*/
protected[diplomacy] lazy val iDirectPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] =
iBindings.flatMap { case (i, n, _, p, s) =>
// query this port index range of this node in the other side of node.
val (start, end) = n.oPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
// Ephemeral nodes ( which have non-None iForward/oForward) have in_degree = out_degree
// Thus, there must exist an Eulerian path and the below algorithms terminate
@scala.annotation.tailrec
private def oTrace(
tuple: (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)
): (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.iForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => oTrace((j, m, p, s))
}
}
@scala.annotation.tailrec
private def iTrace(
tuple: (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)
): (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.oForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => iTrace((j, m, p, s))
}
}
/** Final output ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - Numeric index of this binding in the [[InwardNode]] on the other end.
* - [[InwardNode]] on the other end of this binding.
* - A view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val oPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oDirectPorts.map(oTrace)
/** Final input ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - numeric index of this binding in [[OutwardNode]] on the other end.
* - [[OutwardNode]] on the other end of this binding.
* - a view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val iPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iDirectPorts.map(iTrace)
private var oParamsCycleGuard = false
protected[diplomacy] lazy val diParams: Seq[DI] = iPorts.map { case (i, n, _, _) => n.doParams(i) }
protected[diplomacy] lazy val doParams: Seq[DO] = {
try {
if (oParamsCycleGuard) throw DownwardCycleException()
oParamsCycleGuard = true
val o = mapParamsD(oPorts.size, diParams)
require(
o.size == oPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of outward ports should equal the number of produced outward parameters.
|$context
|$connectedPortsInfo
|Downstreamed inward parameters: [${diParams.mkString(",")}]
|Produced outward parameters: [${o.mkString(",")}]
|""".stripMargin
)
o.map(outer.mixO(_, this))
} catch {
case c: DownwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
private var iParamsCycleGuard = false
protected[diplomacy] lazy val uoParams: Seq[UO] = oPorts.map { case (o, n, _, _) => n.uiParams(o) }
protected[diplomacy] lazy val uiParams: Seq[UI] = {
try {
if (iParamsCycleGuard) throw UpwardCycleException()
iParamsCycleGuard = true
val i = mapParamsU(iPorts.size, uoParams)
require(
i.size == iPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of inward ports should equal the number of produced inward parameters.
|$context
|$connectedPortsInfo
|Upstreamed outward parameters: [${uoParams.mkString(",")}]
|Produced inward parameters: [${i.mkString(",")}]
|""".stripMargin
)
i.map(inner.mixI(_, this))
} catch {
case c: UpwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Outward edge parameters. */
protected[diplomacy] lazy val edgesOut: Seq[EO] =
(oPorts.zip(doParams)).map { case ((i, n, p, s), o) => outer.edgeO(o, n.uiParams(i), p, s) }
/** Inward edge parameters. */
protected[diplomacy] lazy val edgesIn: Seq[EI] =
(iPorts.zip(uiParams)).map { case ((o, n, p, s), i) => inner.edgeI(n.doParams(o), i, p, s) }
/** A tuple of the input edge parameters and output edge parameters for the edges bound to this node.
*
* If you need to access to the edges of a foreign Node, use this method (in/out create bundles).
*/
lazy val edges: Edges[EI, EO] = Edges(edgesIn, edgesOut)
/** Create actual Wires corresponding to the Bundles parameterized by the outward edges of this node. */
protected[diplomacy] lazy val bundleOut: Seq[BO] = edgesOut.map { e =>
val x = Wire(outer.bundleO(e)).suggestName(s"${valName.value}Out")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
/** Create actual Wires corresponding to the Bundles parameterized by the inward edges of this node. */
protected[diplomacy] lazy val bundleIn: Seq[BI] = edgesIn.map { e =>
val x = Wire(inner.bundleI(e)).suggestName(s"${valName.value}In")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
private def emptyDanglesOut: Seq[Dangle] = oPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(serial, i),
sink = HalfEdge(n.serial, j),
flipped = false,
name = wirePrefix + "out",
dataOpt = None
)
}
private def emptyDanglesIn: Seq[Dangle] = iPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(n.serial, j),
sink = HalfEdge(serial, i),
flipped = true,
name = wirePrefix + "in",
dataOpt = None
)
}
/** Create the [[Dangle]]s which describe the connections from this node output to other nodes inputs. */
protected[diplomacy] def danglesOut: Seq[Dangle] = emptyDanglesOut.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleOut(i)))
}
/** Create the [[Dangle]]s which describe the connections from this node input from other nodes outputs. */
protected[diplomacy] def danglesIn: Seq[Dangle] = emptyDanglesIn.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleIn(i)))
}
private[diplomacy] var instantiated = false
/** Gather Bundle and edge parameters of outward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def out: Seq[(BO, EO)] = {
require(
instantiated,
s"$name.out should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleOut.zip(edgesOut)
}
/** Gather Bundle and edge parameters of inward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def in: Seq[(BI, EI)] = {
require(
instantiated,
s"$name.in should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleIn.zip(edgesIn)
}
/** Actually instantiate this node during [[LazyModuleImp]] evaluation. Mark that it's safe to use the Bundle wires,
* instantiate monitors on all input ports if appropriate, and return all the dangles of this node.
*/
protected[diplomacy] def instantiate(): Seq[Dangle] = {
instantiated = true
if (!circuitIdentity) {
(iPorts.zip(in)).foreach { case ((_, _, p, _), (b, e)) => if (p(MonitorsEnabled)) inner.monitor(b, e) }
}
danglesOut ++ danglesIn
}
protected[diplomacy] def cloneDangles(): Seq[Dangle] = emptyDanglesOut ++ emptyDanglesIn
/** Connects the outward part of a node with the inward part of this node. */
protected[diplomacy] def bind(
h: OutwardNode[DI, UI, BI],
binding: NodeBinding
)(
implicit p: Parameters,
sourceInfo: SourceInfo
): Unit = {
val x = this // x := y
val y = h
sourceLine(sourceInfo, " at ", "")
val i = x.iPushed
val o = y.oPushed
y.oPush(
i,
x,
binding match {
case BIND_ONCE => BIND_ONCE
case BIND_FLEX => BIND_FLEX
case BIND_STAR => BIND_QUERY
case BIND_QUERY => BIND_STAR
}
)
x.iPush(o, y, binding)
}
/* Metadata for printing the node graph. */
def inputs: Seq[(OutwardNode[DI, UI, BI], RenderedEdge)] = (iPorts.zip(edgesIn)).map { case ((_, n, p, _), e) =>
val re = inner.render(e)
(n, re.copy(flipped = re.flipped != p(RenderFlipped)))
}
/** Metadata for printing the node graph */
def outputs: Seq[(InwardNode[DO, UO, BO], RenderedEdge)] = oPorts.map { case (i, n, _, _) => (n, n.inputs(i)._2) }
}
File Edges.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
class TLEdge(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdgeParameters(client, manager, params, sourceInfo)
{
def isAligned(address: UInt, lgSize: UInt): Bool = {
if (maxLgSize == 0) true.B else {
val mask = UIntToOH1(lgSize, maxLgSize)
(address & mask) === 0.U
}
}
def mask(address: UInt, lgSize: UInt): UInt =
MaskGen(address, lgSize, manager.beatBytes)
def staticHasData(bundle: TLChannel): Option[Boolean] = {
bundle match {
case _:TLBundleA => {
// Do there exist A messages with Data?
val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial
// Do there exist A messages without Data?
val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint
// Statically optimize the case where hasData is a constant
if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None
}
case _:TLBundleB => {
// Do there exist B messages with Data?
val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial
// Do there exist B messages without Data?
val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint
// Statically optimize the case where hasData is a constant
if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None
}
case _:TLBundleC => {
// Do there eixst C messages with Data?
val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe
// Do there exist C messages without Data?
val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe
if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None
}
case _:TLBundleD => {
// Do there eixst D messages with Data?
val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB
// Do there exist D messages without Data?
val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT
if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None
}
case _:TLBundleE => Some(false)
}
}
def isRequest(x: TLChannel): Bool = {
x match {
case a: TLBundleA => true.B
case b: TLBundleB => true.B
case c: TLBundleC => c.opcode(2) && c.opcode(1)
// opcode === TLMessages.Release ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(2) && !d.opcode(1)
// opcode === TLMessages.Grant ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
}
def isResponse(x: TLChannel): Bool = {
x match {
case a: TLBundleA => false.B
case b: TLBundleB => false.B
case c: TLBundleC => !c.opcode(2) || !c.opcode(1)
// opcode =/= TLMessages.Release &&
// opcode =/= TLMessages.ReleaseData
case d: TLBundleD => true.B // Grant isResponse + isRequest
case e: TLBundleE => true.B
}
}
def hasData(x: TLChannel): Bool = {
val opdata = x match {
case a: TLBundleA => !a.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case b: TLBundleB => !b.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case c: TLBundleC => c.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.ProbeAckData ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
staticHasData(x).map(_.B).getOrElse(opdata)
}
def opcode(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.opcode
case b: TLBundleB => b.opcode
case c: TLBundleC => c.opcode
case d: TLBundleD => d.opcode
}
}
def param(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.param
case b: TLBundleB => b.param
case c: TLBundleC => c.param
case d: TLBundleD => d.param
}
}
def size(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.size
case b: TLBundleB => b.size
case c: TLBundleC => c.size
case d: TLBundleD => d.size
}
}
def data(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.data
case b: TLBundleB => b.data
case c: TLBundleC => c.data
case d: TLBundleD => d.data
}
}
def corrupt(x: TLDataChannel): Bool = {
x match {
case a: TLBundleA => a.corrupt
case b: TLBundleB => b.corrupt
case c: TLBundleC => c.corrupt
case d: TLBundleD => d.corrupt
}
}
def mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.mask
case b: TLBundleB => b.mask
case c: TLBundleC => mask(c.address, c.size)
}
}
def full_mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => mask(a.address, a.size)
case b: TLBundleB => mask(b.address, b.size)
case c: TLBundleC => mask(c.address, c.size)
}
}
def address(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.address
case b: TLBundleB => b.address
case c: TLBundleC => c.address
}
}
def source(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.source
case b: TLBundleB => b.source
case c: TLBundleC => c.source
case d: TLBundleD => d.source
}
}
def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes)
def addr_lo(x: UInt): UInt =
if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0)
def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x))
def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x))
def numBeats(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 1.U
case bundle: TLDataChannel => {
val hasData = this.hasData(bundle)
val size = this.size(bundle)
val cutoff = log2Ceil(manager.beatBytes)
val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U
val decode = UIntToOH(size, maxLgSize+1) >> cutoff
Mux(hasData, decode | small.asUInt, 1.U)
}
}
}
def numBeats1(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 0.U
case bundle: TLDataChannel => {
if (maxLgSize == 0) {
0.U
} else {
val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes)
Mux(hasData(bundle), decode, 0.U)
}
}
}
}
def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val beats1 = numBeats1(bits)
val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W))
val counter1 = counter - 1.U
val first = counter === 0.U
val last = counter === 1.U || beats1 === 0.U
val done = last && fire
val count = (beats1 & ~counter1)
when (fire) {
counter := Mux(first, beats1, counter1)
}
(first, last, done, count)
}
def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1
def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire)
def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid)
def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2
def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire)
def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid)
def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3
def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire)
def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid)
def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3)
}
def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire)
def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid)
def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4)
}
def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire)
def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid)
def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes))
}
def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire)
def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid)
// Does the request need T permissions to be executed?
def needT(a: TLBundleA): Bool = {
val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLPermissions.NtoB -> false.B,
TLPermissions.NtoT -> true.B,
TLPermissions.BtoT -> true.B))
MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array(
TLMessages.PutFullData -> true.B,
TLMessages.PutPartialData -> true.B,
TLMessages.ArithmeticData -> true.B,
TLMessages.LogicalData -> true.B,
TLMessages.Get -> false.B,
TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLHints.PREFETCH_READ -> false.B,
TLHints.PREFETCH_WRITE -> true.B)),
TLMessages.AcquireBlock -> acq_needT,
TLMessages.AcquirePerm -> acq_needT))
}
// This is a very expensive circuit; use only if you really mean it!
def inFlight(x: TLBundle): (UInt, UInt) = {
val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W))
val bce = manager.anySupportAcquireB && client.anySupportProbe
val (a_first, a_last, _) = firstlast(x.a)
val (b_first, b_last, _) = firstlast(x.b)
val (c_first, c_last, _) = firstlast(x.c)
val (d_first, d_last, _) = firstlast(x.d)
val (e_first, e_last, _) = firstlast(x.e)
val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits))
val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits))
val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits))
val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits))
val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits))
val a_inc = x.a.fire && a_first && a_request
val b_inc = x.b.fire && b_first && b_request
val c_inc = x.c.fire && c_first && c_request
val d_inc = x.d.fire && d_first && d_request
val e_inc = x.e.fire && e_first && e_request
val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil))
val a_dec = x.a.fire && a_last && a_response
val b_dec = x.b.fire && b_last && b_response
val c_dec = x.c.fire && c_last && c_response
val d_dec = x.d.fire && d_last && d_response
val e_dec = x.e.fire && e_last && e_response
val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil))
val next_flight = flight + PopCount(inc) - PopCount(dec)
flight := next_flight
(flight, next_flight)
}
def prettySourceMapping(context: String): String = {
s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n"
}
}
class TLEdgeOut(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
// Transfers
def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquireBlock
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquirePerm
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.Release
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ReleaseData
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) =
Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B)
def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAck
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions, data)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAckData
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B)
def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink)
def GrantAck(toSink: UInt): TLBundleE = {
val e = Wire(new TLBundleE(bundle))
e.sink := toSink
e
}
// Accesses
def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsGetFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Get
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutFullFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutFullData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, mask, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutPartialFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutPartialData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = {
require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsArithmeticFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.ArithmeticData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsLogicalFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.LogicalData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = {
require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsHintFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Hint
a.param := param
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data)
def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAckData
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size)
def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.HintAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
}
class TLEdgeIn(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = {
val todo = x.filter(!_.isEmpty)
val heads = todo.map(_.head)
val tails = todo.map(_.tail)
if (todo.isEmpty) Nil else { heads +: myTranspose(tails) }
}
// Transfers
def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = {
require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}")
val legal = client.supportsProbe(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Probe
b.param := capPermissions
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.Grant
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.GrantData
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B)
def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.ReleaseAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
// Accesses
def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = {
require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsGet(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Get
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsPutFull(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutFullData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, mask, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsPutPartial(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutPartialData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsArithmetic(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.ArithmeticData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsLogical(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.LogicalData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = {
require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsHint(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Hint
b.param := param
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size)
def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied)
def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data)
def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAckData
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B)
def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied)
def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B)
def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.HintAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
}
File CLINT.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.devices.tilelink
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.diplomacy.{AddressSet}
import freechips.rocketchip.resources.{Resource, SimpleDevice}
import freechips.rocketchip.interrupts.{IntNexusNode, IntSinkParameters, IntSinkPortParameters, IntSourceParameters, IntSourcePortParameters}
import freechips.rocketchip.regmapper.{RegField, RegFieldDesc, RegFieldGroup}
import freechips.rocketchip.subsystem.{BaseSubsystem, CBUS, TLBusWrapperLocation}
import freechips.rocketchip.tilelink.{TLFragmenter, TLRegisterNode}
import freechips.rocketchip.util.Annotated
object CLINTConsts
{
def msipOffset(hart: Int) = hart * msipBytes
def timecmpOffset(hart: Int) = 0x4000 + hart * timecmpBytes
def timeOffset = 0xbff8
def msipBytes = 4
def timecmpBytes = 8
def size = 0x10000
def timeWidth = 64
def ipiWidth = 32
def ints = 2
}
case class CLINTParams(baseAddress: BigInt = 0x02000000, intStages: Int = 0)
{
def address = AddressSet(baseAddress, CLINTConsts.size-1)
}
case object CLINTKey extends Field[Option[CLINTParams]](None)
case class CLINTAttachParams(
slaveWhere: TLBusWrapperLocation = CBUS
)
case object CLINTAttachKey extends Field(CLINTAttachParams())
class CLINT(params: CLINTParams, beatBytes: Int)(implicit p: Parameters) extends LazyModule
{
import CLINTConsts._
// clint0 => at most 4095 devices
val device = new SimpleDevice("clint", Seq("riscv,clint0")) {
override val alwaysExtended = true
}
val node: TLRegisterNode = TLRegisterNode(
address = Seq(params.address),
device = device,
beatBytes = beatBytes)
val intnode : IntNexusNode = IntNexusNode(
sourceFn = { _ => IntSourcePortParameters(Seq(IntSourceParameters(ints, Seq(Resource(device, "int"))))) },
sinkFn = { _ => IntSinkPortParameters(Seq(IntSinkParameters())) },
outputRequiresInput = false)
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
Annotated.params(this, params)
require (intnode.edges.in.size == 0, "CLINT only produces interrupts; it does not accept them")
val io = IO(new Bundle {
val rtcTick = Input(Bool())
})
val time = RegInit(0.U(timeWidth.W))
when (io.rtcTick) { time := time + 1.U }
val nTiles = intnode.out.size
val timecmp = Seq.fill(nTiles) { Reg(UInt(timeWidth.W)) }
val ipi = Seq.fill(nTiles) { RegInit(0.U(1.W)) }
val (intnode_out, _) = intnode.out.unzip
intnode_out.zipWithIndex.foreach { case (int, i) =>
int(0) := ShiftRegister(ipi(i)(0), params.intStages) // msip
int(1) := ShiftRegister(time.asUInt >= timecmp(i).asUInt, params.intStages) // mtip
}
/* 0000 msip hart 0
* 0004 msip hart 1
* 4000 mtimecmp hart 0 lo
* 4004 mtimecmp hart 0 hi
* 4008 mtimecmp hart 1 lo
* 400c mtimecmp hart 1 hi
* bff8 mtime lo
* bffc mtime hi
*/
node.regmap(
0 -> RegFieldGroup ("msip", Some("MSIP Bits"), ipi.zipWithIndex.flatMap{ case (r, i) =>
RegField(1, r, RegFieldDesc(s"msip_$i", s"MSIP bit for Hart $i", reset=Some(0))) :: RegField(ipiWidth - 1) :: Nil }),
timecmpOffset(0) -> timecmp.zipWithIndex.flatMap{ case (t, i) => RegFieldGroup(s"mtimecmp_$i", Some(s"MTIMECMP for hart $i"),
RegField.bytes(t, Some(RegFieldDesc(s"mtimecmp_$i", "", reset=None))))},
timeOffset -> RegFieldGroup("mtime", Some("Timer Register"),
RegField.bytes(time, Some(RegFieldDesc("mtime", "", reset=Some(0), volatile=true))))
)
}
}
/** Trait that will connect a CLINT to a subsystem */
trait CanHavePeripheryCLINT { this: BaseSubsystem =>
val (clintOpt, clintDomainOpt, clintTickOpt) = p(CLINTKey).map { params =>
val tlbus = locateTLBusWrapper(p(CLINTAttachKey).slaveWhere)
val clintDomainWrapper = tlbus.generateSynchronousDomain("CLINT").suggestName("clint_domain")
val clint = clintDomainWrapper { LazyModule(new CLINT(params, tlbus.beatBytes)) }
clintDomainWrapper { clint.node := tlbus.coupleTo("clint") { TLFragmenter(tlbus, Some("CLINT")) := _ } }
val clintTick = clintDomainWrapper { InModuleBody {
val tick = IO(Input(Bool()))
clint.module.io.rtcTick := tick
tick
}}
(clint, clintDomainWrapper, clintTick)
}.unzip3
}
| module CLINT( // @[CLINT.scala:65:9]
input clock, // @[CLINT.scala:65:9]
input reset, // @[CLINT.scala:65:9]
output auto_int_out_0, // @[LazyModuleImp.scala:107:25]
output auto_int_out_1, // @[LazyModuleImp.scala:107:25]
output auto_in_a_ready, // @[LazyModuleImp.scala:107:25]
input auto_in_a_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_a_bits_param, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_in_a_bits_size, // @[LazyModuleImp.scala:107:25]
input [13:0] auto_in_a_bits_source, // @[LazyModuleImp.scala:107:25]
input [25:0] auto_in_a_bits_address, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_in_a_bits_mask, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_in_a_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_in_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_in_d_ready, // @[LazyModuleImp.scala:107:25]
output auto_in_d_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_in_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_in_d_bits_size, // @[LazyModuleImp.scala:107:25]
output [13:0] auto_in_d_bits_source, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_in_d_bits_data, // @[LazyModuleImp.scala:107:25]
input io_rtcTick // @[CLINT.scala:69:16]
);
wire out_front_valid; // @[RegisterRouter.scala:87:24]
wire out_front_ready; // @[RegisterRouter.scala:87:24]
wire out_bits_read; // @[RegisterRouter.scala:87:24]
wire [13:0] out_bits_extra_tlrr_extra_source; // @[RegisterRouter.scala:87:24]
wire [12:0] in_bits_index; // @[RegisterRouter.scala:73:18]
wire in_bits_read; // @[RegisterRouter.scala:73:18]
wire auto_in_a_valid_0 = auto_in_a_valid; // @[CLINT.scala:65:9]
wire [2:0] auto_in_a_bits_opcode_0 = auto_in_a_bits_opcode; // @[CLINT.scala:65:9]
wire [2:0] auto_in_a_bits_param_0 = auto_in_a_bits_param; // @[CLINT.scala:65:9]
wire [1:0] auto_in_a_bits_size_0 = auto_in_a_bits_size; // @[CLINT.scala:65:9]
wire [13:0] auto_in_a_bits_source_0 = auto_in_a_bits_source; // @[CLINT.scala:65:9]
wire [25:0] auto_in_a_bits_address_0 = auto_in_a_bits_address; // @[CLINT.scala:65:9]
wire [7:0] auto_in_a_bits_mask_0 = auto_in_a_bits_mask; // @[CLINT.scala:65:9]
wire [63:0] auto_in_a_bits_data_0 = auto_in_a_bits_data; // @[CLINT.scala:65:9]
wire auto_in_a_bits_corrupt_0 = auto_in_a_bits_corrupt; // @[CLINT.scala:65:9]
wire auto_in_d_ready_0 = auto_in_d_ready; // @[CLINT.scala:65:9]
wire io_rtcTick_0 = io_rtcTick; // @[CLINT.scala:65:9]
wire [12:0] out_maskMatch = 13'h7FF; // @[RegisterRouter.scala:87:24]
wire [2:0] nodeIn_d_bits_d_opcode = 3'h0; // @[Edges.scala:792:17]
wire [63:0] _out_out_bits_data_WIRE_1_3 = 64'h0; // @[MuxLiteral.scala:49:48]
wire [63:0] nodeIn_d_bits_d_data = 64'h0; // @[Edges.scala:792:17]
wire auto_in_d_bits_sink = 1'h0; // @[CLINT.scala:65:9]
wire auto_in_d_bits_denied = 1'h0; // @[CLINT.scala:65:9]
wire auto_in_d_bits_corrupt = 1'h0; // @[CLINT.scala:65:9]
wire nodeIn_d_bits_sink = 1'h0; // @[MixedNode.scala:551:17]
wire nodeIn_d_bits_denied = 1'h0; // @[MixedNode.scala:551:17]
wire nodeIn_d_bits_corrupt = 1'h0; // @[MixedNode.scala:551:17]
wire _valids_WIRE_0 = 1'h0; // @[RegField.scala:153:53]
wire _valids_WIRE_1 = 1'h0; // @[RegField.scala:153:53]
wire _valids_WIRE_2 = 1'h0; // @[RegField.scala:153:53]
wire _valids_WIRE_3 = 1'h0; // @[RegField.scala:153:53]
wire _valids_WIRE_4 = 1'h0; // @[RegField.scala:153:53]
wire _valids_WIRE_5 = 1'h0; // @[RegField.scala:153:53]
wire _valids_WIRE_6 = 1'h0; // @[RegField.scala:153:53]
wire _valids_WIRE_7 = 1'h0; // @[RegField.scala:153:53]
wire _valids_WIRE_1_0 = 1'h0; // @[RegField.scala:153:53]
wire _valids_WIRE_1_1 = 1'h0; // @[RegField.scala:153:53]
wire _valids_WIRE_1_2 = 1'h0; // @[RegField.scala:153:53]
wire _valids_WIRE_1_3 = 1'h0; // @[RegField.scala:153:53]
wire _valids_WIRE_1_4 = 1'h0; // @[RegField.scala:153:53]
wire _valids_WIRE_1_5 = 1'h0; // @[RegField.scala:153:53]
wire _valids_WIRE_1_6 = 1'h0; // @[RegField.scala:153:53]
wire _valids_WIRE_1_7 = 1'h0; // @[RegField.scala:153:53]
wire _out_rifireMux_T_16 = 1'h0; // @[RegisterRouter.scala:87:24]
wire _out_rifireMux_T_18 = 1'h0; // @[MuxLiteral.scala:49:17]
wire _out_wifireMux_T_17 = 1'h0; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_T_19 = 1'h0; // @[MuxLiteral.scala:49:17]
wire _out_rofireMux_T_16 = 1'h0; // @[RegisterRouter.scala:87:24]
wire _out_rofireMux_T_18 = 1'h0; // @[MuxLiteral.scala:49:17]
wire _out_wofireMux_T_17 = 1'h0; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_T_19 = 1'h0; // @[MuxLiteral.scala:49:17]
wire _out_out_bits_data_T = 1'h0; // @[MuxLiteral.scala:49:17]
wire _out_out_bits_data_T_2 = 1'h0; // @[MuxLiteral.scala:49:17]
wire nodeIn_d_bits_d_sink = 1'h0; // @[Edges.scala:792:17]
wire nodeIn_d_bits_d_denied = 1'h0; // @[Edges.scala:792:17]
wire nodeIn_d_bits_d_corrupt = 1'h0; // @[Edges.scala:792:17]
wire [1:0] auto_in_d_bits_param = 2'h0; // @[CLINT.scala:65:9]
wire [1:0] nodeIn_d_bits_param = 2'h0; // @[MixedNode.scala:551:17]
wire [1:0] nodeIn_d_bits_d_param = 2'h0; // @[Edges.scala:792:17]
wire intnodeOut_0; // @[MixedNode.scala:542:17]
wire out_rifireMux_out = 1'h1; // @[RegisterRouter.scala:87:24]
wire _out_rifireMux_T_5 = 1'h1; // @[RegisterRouter.scala:87:24]
wire out_rifireMux_out_1 = 1'h1; // @[RegisterRouter.scala:87:24]
wire _out_rifireMux_T_9 = 1'h1; // @[RegisterRouter.scala:87:24]
wire out_rifireMux_out_2 = 1'h1; // @[RegisterRouter.scala:87:24]
wire _out_rifireMux_T_13 = 1'h1; // @[RegisterRouter.scala:87:24]
wire out_rifireMux_out_3 = 1'h1; // @[RegisterRouter.scala:87:24]
wire _out_rifireMux_T_17 = 1'h1; // @[RegisterRouter.scala:87:24]
wire _out_rifireMux_WIRE_0 = 1'h1; // @[MuxLiteral.scala:49:48]
wire _out_rifireMux_WIRE_1 = 1'h1; // @[MuxLiteral.scala:49:48]
wire _out_rifireMux_WIRE_2 = 1'h1; // @[MuxLiteral.scala:49:48]
wire _out_rifireMux_WIRE_3 = 1'h1; // @[MuxLiteral.scala:49:48]
wire out_rifireMux = 1'h1; // @[MuxLiteral.scala:49:10]
wire out_wifireMux_out = 1'h1; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_T_6 = 1'h1; // @[RegisterRouter.scala:87:24]
wire out_wifireMux_out_1 = 1'h1; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_T_10 = 1'h1; // @[RegisterRouter.scala:87:24]
wire out_wifireMux_out_2 = 1'h1; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_T_14 = 1'h1; // @[RegisterRouter.scala:87:24]
wire out_wifireMux_out_3 = 1'h1; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_T_18 = 1'h1; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_WIRE_0 = 1'h1; // @[MuxLiteral.scala:49:48]
wire _out_wifireMux_WIRE_1 = 1'h1; // @[MuxLiteral.scala:49:48]
wire _out_wifireMux_WIRE_2 = 1'h1; // @[MuxLiteral.scala:49:48]
wire _out_wifireMux_WIRE_3 = 1'h1; // @[MuxLiteral.scala:49:48]
wire out_wifireMux = 1'h1; // @[MuxLiteral.scala:49:10]
wire out_rofireMux_out = 1'h1; // @[RegisterRouter.scala:87:24]
wire _out_rofireMux_T_5 = 1'h1; // @[RegisterRouter.scala:87:24]
wire out_rofireMux_out_1 = 1'h1; // @[RegisterRouter.scala:87:24]
wire _out_rofireMux_T_9 = 1'h1; // @[RegisterRouter.scala:87:24]
wire out_rofireMux_out_2 = 1'h1; // @[RegisterRouter.scala:87:24]
wire _out_rofireMux_T_13 = 1'h1; // @[RegisterRouter.scala:87:24]
wire out_rofireMux_out_3 = 1'h1; // @[RegisterRouter.scala:87:24]
wire _out_rofireMux_T_17 = 1'h1; // @[RegisterRouter.scala:87:24]
wire _out_rofireMux_WIRE_0 = 1'h1; // @[MuxLiteral.scala:49:48]
wire _out_rofireMux_WIRE_1 = 1'h1; // @[MuxLiteral.scala:49:48]
wire _out_rofireMux_WIRE_2 = 1'h1; // @[MuxLiteral.scala:49:48]
wire _out_rofireMux_WIRE_3 = 1'h1; // @[MuxLiteral.scala:49:48]
wire out_rofireMux = 1'h1; // @[MuxLiteral.scala:49:10]
wire out_wofireMux_out = 1'h1; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_T_6 = 1'h1; // @[RegisterRouter.scala:87:24]
wire out_wofireMux_out_1 = 1'h1; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_T_10 = 1'h1; // @[RegisterRouter.scala:87:24]
wire out_wofireMux_out_2 = 1'h1; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_T_14 = 1'h1; // @[RegisterRouter.scala:87:24]
wire out_wofireMux_out_3 = 1'h1; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_T_18 = 1'h1; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_WIRE_0 = 1'h1; // @[MuxLiteral.scala:49:48]
wire _out_wofireMux_WIRE_1 = 1'h1; // @[MuxLiteral.scala:49:48]
wire _out_wofireMux_WIRE_2 = 1'h1; // @[MuxLiteral.scala:49:48]
wire _out_wofireMux_WIRE_3 = 1'h1; // @[MuxLiteral.scala:49:48]
wire out_wofireMux = 1'h1; // @[MuxLiteral.scala:49:10]
wire out_iready = 1'h1; // @[RegisterRouter.scala:87:24]
wire out_oready = 1'h1; // @[RegisterRouter.scala:87:24]
wire _out_out_bits_data_WIRE_3 = 1'h1; // @[MuxLiteral.scala:49:48]
wire intnodeOut_1; // @[MixedNode.scala:542:17]
wire nodeIn_a_ready; // @[MixedNode.scala:551:17]
wire nodeIn_a_valid = auto_in_a_valid_0; // @[CLINT.scala:65:9]
wire [2:0] nodeIn_a_bits_opcode = auto_in_a_bits_opcode_0; // @[CLINT.scala:65:9]
wire [2:0] nodeIn_a_bits_param = auto_in_a_bits_param_0; // @[CLINT.scala:65:9]
wire [1:0] nodeIn_a_bits_size = auto_in_a_bits_size_0; // @[CLINT.scala:65:9]
wire [13:0] nodeIn_a_bits_source = auto_in_a_bits_source_0; // @[CLINT.scala:65:9]
wire [25:0] nodeIn_a_bits_address = auto_in_a_bits_address_0; // @[CLINT.scala:65:9]
wire [7:0] nodeIn_a_bits_mask = auto_in_a_bits_mask_0; // @[CLINT.scala:65:9]
wire [63:0] nodeIn_a_bits_data = auto_in_a_bits_data_0; // @[CLINT.scala:65:9]
wire nodeIn_a_bits_corrupt = auto_in_a_bits_corrupt_0; // @[CLINT.scala:65:9]
wire nodeIn_d_ready = auto_in_d_ready_0; // @[CLINT.scala:65:9]
wire nodeIn_d_valid; // @[MixedNode.scala:551:17]
wire [2:0] nodeIn_d_bits_opcode; // @[MixedNode.scala:551:17]
wire [1:0] nodeIn_d_bits_size; // @[MixedNode.scala:551:17]
wire [13:0] nodeIn_d_bits_source; // @[MixedNode.scala:551:17]
wire [63:0] nodeIn_d_bits_data; // @[MixedNode.scala:551:17]
wire auto_int_out_0_0; // @[CLINT.scala:65:9]
wire auto_int_out_1_0; // @[CLINT.scala:65:9]
wire auto_in_a_ready_0; // @[CLINT.scala:65:9]
wire [2:0] auto_in_d_bits_opcode_0; // @[CLINT.scala:65:9]
wire [1:0] auto_in_d_bits_size_0; // @[CLINT.scala:65:9]
wire [13:0] auto_in_d_bits_source_0; // @[CLINT.scala:65:9]
wire [63:0] auto_in_d_bits_data_0; // @[CLINT.scala:65:9]
wire auto_in_d_valid_0; // @[CLINT.scala:65:9]
wire in_ready; // @[RegisterRouter.scala:73:18]
assign auto_in_a_ready_0 = nodeIn_a_ready; // @[CLINT.scala:65:9]
wire in_valid = nodeIn_a_valid; // @[RegisterRouter.scala:73:18]
wire [1:0] in_bits_extra_tlrr_extra_size = nodeIn_a_bits_size; // @[RegisterRouter.scala:73:18]
wire [13:0] in_bits_extra_tlrr_extra_source = nodeIn_a_bits_source; // @[RegisterRouter.scala:73:18]
wire [7:0] in_bits_mask = nodeIn_a_bits_mask; // @[RegisterRouter.scala:73:18]
wire [63:0] in_bits_data = nodeIn_a_bits_data; // @[RegisterRouter.scala:73:18]
wire out_ready = nodeIn_d_ready; // @[RegisterRouter.scala:87:24]
wire out_valid; // @[RegisterRouter.scala:87:24]
assign auto_in_d_valid_0 = nodeIn_d_valid; // @[CLINT.scala:65:9]
assign auto_in_d_bits_opcode_0 = nodeIn_d_bits_opcode; // @[CLINT.scala:65:9]
wire [1:0] nodeIn_d_bits_d_size; // @[Edges.scala:792:17]
assign auto_in_d_bits_size_0 = nodeIn_d_bits_size; // @[CLINT.scala:65:9]
wire [13:0] nodeIn_d_bits_d_source; // @[Edges.scala:792:17]
assign auto_in_d_bits_source_0 = nodeIn_d_bits_source; // @[CLINT.scala:65:9]
wire [63:0] out_bits_data; // @[RegisterRouter.scala:87:24]
assign auto_in_d_bits_data_0 = nodeIn_d_bits_data; // @[CLINT.scala:65:9]
wire _intnodeOut_0_T; // @[CLINT.scala:82:37]
assign auto_int_out_0_0 = intnodeOut_0; // @[CLINT.scala:65:9]
wire _intnodeOut_1_T; // @[CLINT.scala:83:43]
assign auto_int_out_1_0 = intnodeOut_1; // @[CLINT.scala:65:9]
reg [63:0] time_0; // @[CLINT.scala:73:23]
wire [63:0] pad_1 = time_0; // @[RegField.scala:150:19]
wire [64:0] _time_T = {1'h0, time_0} + 65'h1; // @[CLINT.scala:73:23, :74:38]
wire [63:0] _time_T_1 = _time_T[63:0]; // @[CLINT.scala:74:38]
reg [63:0] timecmp_0; // @[CLINT.scala:77:41]
wire [63:0] pad = timecmp_0; // @[RegField.scala:150:19]
reg ipi_0; // @[CLINT.scala:78:41]
assign _intnodeOut_0_T = ipi_0; // @[CLINT.scala:78:41, :82:37]
wire _out_T_15 = ipi_0; // @[RegisterRouter.scala:87:24]
assign intnodeOut_0 = _intnodeOut_0_T; // @[CLINT.scala:82:37]
assign _intnodeOut_1_T = time_0 >= timecmp_0; // @[CLINT.scala:73:23, :77:41, :83:43]
assign intnodeOut_1 = _intnodeOut_1_T; // @[CLINT.scala:83:43]
wire [7:0] _oldBytes_T = pad[7:0]; // @[RegField.scala:150:19, :151:57]
wire [7:0] oldBytes_0 = _oldBytes_T; // @[RegField.scala:151:{47,57}]
wire [7:0] _oldBytes_T_1 = pad[15:8]; // @[RegField.scala:150:19, :151:57]
wire [7:0] oldBytes_1 = _oldBytes_T_1; // @[RegField.scala:151:{47,57}]
wire [7:0] _oldBytes_T_2 = pad[23:16]; // @[RegField.scala:150:19, :151:57]
wire [7:0] oldBytes_2 = _oldBytes_T_2; // @[RegField.scala:151:{47,57}]
wire [7:0] _oldBytes_T_3 = pad[31:24]; // @[RegField.scala:150:19, :151:57]
wire [7:0] oldBytes_3 = _oldBytes_T_3; // @[RegField.scala:151:{47,57}]
wire [7:0] _oldBytes_T_4 = pad[39:32]; // @[RegField.scala:150:19, :151:57]
wire [7:0] oldBytes_4 = _oldBytes_T_4; // @[RegField.scala:151:{47,57}]
wire [7:0] _oldBytes_T_5 = pad[47:40]; // @[RegField.scala:150:19, :151:57]
wire [7:0] oldBytes_5 = _oldBytes_T_5; // @[RegField.scala:151:{47,57}]
wire [7:0] _oldBytes_T_6 = pad[55:48]; // @[RegField.scala:150:19, :151:57]
wire [7:0] oldBytes_6 = _oldBytes_T_6; // @[RegField.scala:151:{47,57}]
wire [7:0] _oldBytes_T_7 = pad[63:56]; // @[RegField.scala:150:19, :151:57]
wire [7:0] oldBytes_7 = _oldBytes_T_7; // @[RegField.scala:151:{47,57}]
wire [7:0] _out_T_123 = oldBytes_0; // @[RegisterRouter.scala:87:24]
wire [7:0] newBytes_0; // @[RegField.scala:152:31]
wire [7:0] newBytes_1; // @[RegField.scala:152:31]
wire [7:0] newBytes_2; // @[RegField.scala:152:31]
wire [7:0] newBytes_3; // @[RegField.scala:152:31]
wire [7:0] newBytes_4; // @[RegField.scala:152:31]
wire [7:0] newBytes_5; // @[RegField.scala:152:31]
wire [7:0] newBytes_6; // @[RegField.scala:152:31]
wire [7:0] newBytes_7; // @[RegField.scala:152:31]
wire out_f_woready_10; // @[RegisterRouter.scala:87:24]
wire out_f_woready_11; // @[RegisterRouter.scala:87:24]
wire out_f_woready_12; // @[RegisterRouter.scala:87:24]
wire out_f_woready_13; // @[RegisterRouter.scala:87:24]
wire out_f_woready_14; // @[RegisterRouter.scala:87:24]
wire out_f_woready_15; // @[RegisterRouter.scala:87:24]
wire out_f_woready_16; // @[RegisterRouter.scala:87:24]
wire out_f_woready_17; // @[RegisterRouter.scala:87:24]
wire valids_0; // @[RegField.scala:153:29]
wire valids_1; // @[RegField.scala:153:29]
wire valids_2; // @[RegField.scala:153:29]
wire valids_3; // @[RegField.scala:153:29]
wire valids_4; // @[RegField.scala:153:29]
wire valids_5; // @[RegField.scala:153:29]
wire valids_6; // @[RegField.scala:153:29]
wire valids_7; // @[RegField.scala:153:29]
wire [15:0] timecmp_0_lo_lo = {newBytes_1, newBytes_0}; // @[RegField.scala:152:31, :154:52]
wire [15:0] timecmp_0_lo_hi = {newBytes_3, newBytes_2}; // @[RegField.scala:152:31, :154:52]
wire [31:0] timecmp_0_lo = {timecmp_0_lo_hi, timecmp_0_lo_lo}; // @[RegField.scala:154:52]
wire [15:0] timecmp_0_hi_lo = {newBytes_5, newBytes_4}; // @[RegField.scala:152:31, :154:52]
wire [15:0] timecmp_0_hi_hi = {newBytes_7, newBytes_6}; // @[RegField.scala:152:31, :154:52]
wire [31:0] timecmp_0_hi = {timecmp_0_hi_hi, timecmp_0_hi_lo}; // @[RegField.scala:154:52]
wire [63:0] _timecmp_0_T = {timecmp_0_hi, timecmp_0_lo}; // @[RegField.scala:154:52]
wire [7:0] _oldBytes_T_8 = pad_1[7:0]; // @[RegField.scala:150:19, :151:57]
wire [7:0] oldBytes_1_0 = _oldBytes_T_8; // @[RegField.scala:151:{47,57}]
wire [7:0] _oldBytes_T_9 = pad_1[15:8]; // @[RegField.scala:150:19, :151:57]
wire [7:0] oldBytes_1_1 = _oldBytes_T_9; // @[RegField.scala:151:{47,57}]
wire [7:0] _oldBytes_T_10 = pad_1[23:16]; // @[RegField.scala:150:19, :151:57]
wire [7:0] oldBytes_1_2 = _oldBytes_T_10; // @[RegField.scala:151:{47,57}]
wire [7:0] _oldBytes_T_11 = pad_1[31:24]; // @[RegField.scala:150:19, :151:57]
wire [7:0] oldBytes_1_3 = _oldBytes_T_11; // @[RegField.scala:151:{47,57}]
wire [7:0] _oldBytes_T_12 = pad_1[39:32]; // @[RegField.scala:150:19, :151:57]
wire [7:0] oldBytes_1_4 = _oldBytes_T_12; // @[RegField.scala:151:{47,57}]
wire [7:0] _oldBytes_T_13 = pad_1[47:40]; // @[RegField.scala:150:19, :151:57]
wire [7:0] oldBytes_1_5 = _oldBytes_T_13; // @[RegField.scala:151:{47,57}]
wire [7:0] _oldBytes_T_14 = pad_1[55:48]; // @[RegField.scala:150:19, :151:57]
wire [7:0] oldBytes_1_6 = _oldBytes_T_14; // @[RegField.scala:151:{47,57}]
wire [7:0] _oldBytes_T_15 = pad_1[63:56]; // @[RegField.scala:150:19, :151:57]
wire [7:0] oldBytes_1_7 = _oldBytes_T_15; // @[RegField.scala:151:{47,57}]
wire [7:0] _out_T_35 = oldBytes_1_0; // @[RegisterRouter.scala:87:24]
wire [7:0] newBytes_1_0; // @[RegField.scala:152:31]
wire [7:0] newBytes_1_1; // @[RegField.scala:152:31]
wire [7:0] newBytes_1_2; // @[RegField.scala:152:31]
wire [7:0] newBytes_1_3; // @[RegField.scala:152:31]
wire [7:0] newBytes_1_4; // @[RegField.scala:152:31]
wire [7:0] newBytes_1_5; // @[RegField.scala:152:31]
wire [7:0] newBytes_1_6; // @[RegField.scala:152:31]
wire [7:0] newBytes_1_7; // @[RegField.scala:152:31]
wire out_f_woready_2; // @[RegisterRouter.scala:87:24]
wire out_f_woready_3; // @[RegisterRouter.scala:87:24]
wire out_f_woready_4; // @[RegisterRouter.scala:87:24]
wire out_f_woready_5; // @[RegisterRouter.scala:87:24]
wire out_f_woready_6; // @[RegisterRouter.scala:87:24]
wire out_f_woready_7; // @[RegisterRouter.scala:87:24]
wire out_f_woready_8; // @[RegisterRouter.scala:87:24]
wire out_f_woready_9; // @[RegisterRouter.scala:87:24]
wire valids_1_0; // @[RegField.scala:153:29]
wire valids_1_1; // @[RegField.scala:153:29]
wire valids_1_2; // @[RegField.scala:153:29]
wire valids_1_3; // @[RegField.scala:153:29]
wire valids_1_4; // @[RegField.scala:153:29]
wire valids_1_5; // @[RegField.scala:153:29]
wire valids_1_6; // @[RegField.scala:153:29]
wire valids_1_7; // @[RegField.scala:153:29]
wire [15:0] time_lo_lo = {newBytes_1_1, newBytes_1_0}; // @[RegField.scala:152:31, :154:52]
wire [15:0] time_lo_hi = {newBytes_1_3, newBytes_1_2}; // @[RegField.scala:152:31, :154:52]
wire [31:0] time_lo = {time_lo_hi, time_lo_lo}; // @[RegField.scala:154:52]
wire [15:0] time_hi_lo = {newBytes_1_5, newBytes_1_4}; // @[RegField.scala:152:31, :154:52]
wire [15:0] time_hi_hi = {newBytes_1_7, newBytes_1_6}; // @[RegField.scala:152:31, :154:52]
wire [31:0] time_hi = {time_hi_hi, time_hi_lo}; // @[RegField.scala:154:52]
wire [63:0] _time_T_2 = {time_hi, time_lo}; // @[RegField.scala:154:52]
wire _out_in_ready_T; // @[RegisterRouter.scala:87:24]
assign nodeIn_a_ready = in_ready; // @[RegisterRouter.scala:73:18]
wire _in_bits_read_T; // @[RegisterRouter.scala:74:36]
wire _out_front_valid_T = in_valid; // @[RegisterRouter.scala:73:18, :87:24]
wire out_front_bits_read = in_bits_read; // @[RegisterRouter.scala:73:18, :87:24]
wire [12:0] out_front_bits_index = in_bits_index; // @[RegisterRouter.scala:73:18, :87:24]
wire [63:0] out_front_bits_data = in_bits_data; // @[RegisterRouter.scala:73:18, :87:24]
wire [7:0] out_front_bits_mask = in_bits_mask; // @[RegisterRouter.scala:73:18, :87:24]
wire [13:0] out_front_bits_extra_tlrr_extra_source = in_bits_extra_tlrr_extra_source; // @[RegisterRouter.scala:73:18, :87:24]
wire [1:0] out_front_bits_extra_tlrr_extra_size = in_bits_extra_tlrr_extra_size; // @[RegisterRouter.scala:73:18, :87:24]
assign _in_bits_read_T = nodeIn_a_bits_opcode == 3'h4; // @[RegisterRouter.scala:74:36]
assign in_bits_read = _in_bits_read_T; // @[RegisterRouter.scala:73:18, :74:36]
wire [22:0] _in_bits_index_T = nodeIn_a_bits_address[25:3]; // @[Edges.scala:192:34]
assign in_bits_index = _in_bits_index_T[12:0]; // @[RegisterRouter.scala:73:18, :75:19]
wire _out_front_ready_T = out_ready; // @[RegisterRouter.scala:87:24]
wire _out_out_valid_T; // @[RegisterRouter.scala:87:24]
assign nodeIn_d_valid = out_valid; // @[RegisterRouter.scala:87:24]
wire [63:0] _out_out_bits_data_T_4; // @[RegisterRouter.scala:87:24]
wire _nodeIn_d_bits_opcode_T = out_bits_read; // @[RegisterRouter.scala:87:24, :105:25]
assign nodeIn_d_bits_data = out_bits_data; // @[RegisterRouter.scala:87:24]
assign nodeIn_d_bits_d_source = out_bits_extra_tlrr_extra_source; // @[RegisterRouter.scala:87:24]
wire [1:0] out_bits_extra_tlrr_extra_size; // @[RegisterRouter.scala:87:24]
assign nodeIn_d_bits_d_size = out_bits_extra_tlrr_extra_size; // @[RegisterRouter.scala:87:24]
assign _out_in_ready_T = out_front_ready; // @[RegisterRouter.scala:87:24]
assign _out_out_valid_T = out_front_valid; // @[RegisterRouter.scala:87:24]
assign out_bits_read = out_front_bits_read; // @[RegisterRouter.scala:87:24]
assign out_bits_extra_tlrr_extra_source = out_front_bits_extra_tlrr_extra_source; // @[RegisterRouter.scala:87:24]
assign out_bits_extra_tlrr_extra_size = out_front_bits_extra_tlrr_extra_size; // @[RegisterRouter.scala:87:24]
wire [12:0] _GEN = out_front_bits_index & 13'h7FF; // @[RegisterRouter.scala:87:24]
wire [12:0] out_findex; // @[RegisterRouter.scala:87:24]
assign out_findex = _GEN; // @[RegisterRouter.scala:87:24]
wire [12:0] out_bindex; // @[RegisterRouter.scala:87:24]
assign out_bindex = _GEN; // @[RegisterRouter.scala:87:24]
wire _GEN_0 = out_findex == 13'h0; // @[RegisterRouter.scala:87:24]
wire _out_T; // @[RegisterRouter.scala:87:24]
assign _out_T = _GEN_0; // @[RegisterRouter.scala:87:24]
wire _out_T_4; // @[RegisterRouter.scala:87:24]
assign _out_T_4 = _GEN_0; // @[RegisterRouter.scala:87:24]
wire _GEN_1 = out_bindex == 13'h0; // @[RegisterRouter.scala:87:24]
wire _out_T_1; // @[RegisterRouter.scala:87:24]
assign _out_T_1 = _GEN_1; // @[RegisterRouter.scala:87:24]
wire _out_T_5; // @[RegisterRouter.scala:87:24]
assign _out_T_5 = _GEN_1; // @[RegisterRouter.scala:87:24]
wire _out_out_bits_data_WIRE_0 = _out_T_1; // @[MuxLiteral.scala:49:48]
wire _out_T_2 = out_findex == 13'h7FF; // @[RegisterRouter.scala:87:24]
wire _out_T_3 = out_bindex == 13'h7FF; // @[RegisterRouter.scala:87:24]
wire _out_out_bits_data_WIRE_2 = _out_T_3; // @[MuxLiteral.scala:49:48]
wire _out_rifireMux_T_3; // @[RegisterRouter.scala:87:24]
wire _out_out_bits_data_WIRE_1 = _out_T_5; // @[MuxLiteral.scala:49:48]
wire _out_rifireMux_T_11; // @[RegisterRouter.scala:87:24]
wire _out_rifireMux_T_7; // @[RegisterRouter.scala:87:24]
wire out_rivalid_0; // @[RegisterRouter.scala:87:24]
wire out_rivalid_1; // @[RegisterRouter.scala:87:24]
wire out_rivalid_2; // @[RegisterRouter.scala:87:24]
wire out_rivalid_3; // @[RegisterRouter.scala:87:24]
wire out_rivalid_4; // @[RegisterRouter.scala:87:24]
wire out_rivalid_5; // @[RegisterRouter.scala:87:24]
wire out_rivalid_6; // @[RegisterRouter.scala:87:24]
wire out_rivalid_7; // @[RegisterRouter.scala:87:24]
wire out_rivalid_8; // @[RegisterRouter.scala:87:24]
wire out_rivalid_9; // @[RegisterRouter.scala:87:24]
wire out_rivalid_10; // @[RegisterRouter.scala:87:24]
wire out_rivalid_11; // @[RegisterRouter.scala:87:24]
wire out_rivalid_12; // @[RegisterRouter.scala:87:24]
wire out_rivalid_13; // @[RegisterRouter.scala:87:24]
wire out_rivalid_14; // @[RegisterRouter.scala:87:24]
wire out_rivalid_15; // @[RegisterRouter.scala:87:24]
wire out_rivalid_16; // @[RegisterRouter.scala:87:24]
wire out_rivalid_17; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_T_4; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_T_12; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_T_8; // @[RegisterRouter.scala:87:24]
wire out_wivalid_0; // @[RegisterRouter.scala:87:24]
wire out_wivalid_1; // @[RegisterRouter.scala:87:24]
wire out_wivalid_2; // @[RegisterRouter.scala:87:24]
wire out_wivalid_3; // @[RegisterRouter.scala:87:24]
wire out_wivalid_4; // @[RegisterRouter.scala:87:24]
wire out_wivalid_5; // @[RegisterRouter.scala:87:24]
wire out_wivalid_6; // @[RegisterRouter.scala:87:24]
wire out_wivalid_7; // @[RegisterRouter.scala:87:24]
wire out_wivalid_8; // @[RegisterRouter.scala:87:24]
wire out_wivalid_9; // @[RegisterRouter.scala:87:24]
wire out_wivalid_10; // @[RegisterRouter.scala:87:24]
wire out_wivalid_11; // @[RegisterRouter.scala:87:24]
wire out_wivalid_12; // @[RegisterRouter.scala:87:24]
wire out_wivalid_13; // @[RegisterRouter.scala:87:24]
wire out_wivalid_14; // @[RegisterRouter.scala:87:24]
wire out_wivalid_15; // @[RegisterRouter.scala:87:24]
wire out_wivalid_16; // @[RegisterRouter.scala:87:24]
wire out_wivalid_17; // @[RegisterRouter.scala:87:24]
wire _out_rofireMux_T_3; // @[RegisterRouter.scala:87:24]
wire _out_rofireMux_T_11; // @[RegisterRouter.scala:87:24]
wire _out_rofireMux_T_7; // @[RegisterRouter.scala:87:24]
wire out_roready_0; // @[RegisterRouter.scala:87:24]
wire out_roready_1; // @[RegisterRouter.scala:87:24]
wire out_roready_2; // @[RegisterRouter.scala:87:24]
wire out_roready_3; // @[RegisterRouter.scala:87:24]
wire out_roready_4; // @[RegisterRouter.scala:87:24]
wire out_roready_5; // @[RegisterRouter.scala:87:24]
wire out_roready_6; // @[RegisterRouter.scala:87:24]
wire out_roready_7; // @[RegisterRouter.scala:87:24]
wire out_roready_8; // @[RegisterRouter.scala:87:24]
wire out_roready_9; // @[RegisterRouter.scala:87:24]
wire out_roready_10; // @[RegisterRouter.scala:87:24]
wire out_roready_11; // @[RegisterRouter.scala:87:24]
wire out_roready_12; // @[RegisterRouter.scala:87:24]
wire out_roready_13; // @[RegisterRouter.scala:87:24]
wire out_roready_14; // @[RegisterRouter.scala:87:24]
wire out_roready_15; // @[RegisterRouter.scala:87:24]
wire out_roready_16; // @[RegisterRouter.scala:87:24]
wire out_roready_17; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_T_4; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_T_12; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_T_8; // @[RegisterRouter.scala:87:24]
wire out_woready_0; // @[RegisterRouter.scala:87:24]
wire out_woready_1; // @[RegisterRouter.scala:87:24]
wire out_woready_2; // @[RegisterRouter.scala:87:24]
wire out_woready_3; // @[RegisterRouter.scala:87:24]
wire out_woready_4; // @[RegisterRouter.scala:87:24]
wire out_woready_5; // @[RegisterRouter.scala:87:24]
wire out_woready_6; // @[RegisterRouter.scala:87:24]
wire out_woready_7; // @[RegisterRouter.scala:87:24]
wire out_woready_8; // @[RegisterRouter.scala:87:24]
wire out_woready_9; // @[RegisterRouter.scala:87:24]
wire out_woready_10; // @[RegisterRouter.scala:87:24]
wire out_woready_11; // @[RegisterRouter.scala:87:24]
wire out_woready_12; // @[RegisterRouter.scala:87:24]
wire out_woready_13; // @[RegisterRouter.scala:87:24]
wire out_woready_14; // @[RegisterRouter.scala:87:24]
wire out_woready_15; // @[RegisterRouter.scala:87:24]
wire out_woready_16; // @[RegisterRouter.scala:87:24]
wire out_woready_17; // @[RegisterRouter.scala:87:24]
wire _out_frontMask_T = out_front_bits_mask[0]; // @[RegisterRouter.scala:87:24]
wire _out_backMask_T = out_front_bits_mask[0]; // @[RegisterRouter.scala:87:24]
wire _out_frontMask_T_1 = out_front_bits_mask[1]; // @[RegisterRouter.scala:87:24]
wire _out_backMask_T_1 = out_front_bits_mask[1]; // @[RegisterRouter.scala:87:24]
wire _out_frontMask_T_2 = out_front_bits_mask[2]; // @[RegisterRouter.scala:87:24]
wire _out_backMask_T_2 = out_front_bits_mask[2]; // @[RegisterRouter.scala:87:24]
wire _out_frontMask_T_3 = out_front_bits_mask[3]; // @[RegisterRouter.scala:87:24]
wire _out_backMask_T_3 = out_front_bits_mask[3]; // @[RegisterRouter.scala:87:24]
wire _out_frontMask_T_4 = out_front_bits_mask[4]; // @[RegisterRouter.scala:87:24]
wire _out_backMask_T_4 = out_front_bits_mask[4]; // @[RegisterRouter.scala:87:24]
wire _out_frontMask_T_5 = out_front_bits_mask[5]; // @[RegisterRouter.scala:87:24]
wire _out_backMask_T_5 = out_front_bits_mask[5]; // @[RegisterRouter.scala:87:24]
wire _out_frontMask_T_6 = out_front_bits_mask[6]; // @[RegisterRouter.scala:87:24]
wire _out_backMask_T_6 = out_front_bits_mask[6]; // @[RegisterRouter.scala:87:24]
wire _out_frontMask_T_7 = out_front_bits_mask[7]; // @[RegisterRouter.scala:87:24]
wire _out_backMask_T_7 = out_front_bits_mask[7]; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_frontMask_T_8 = {8{_out_frontMask_T}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_frontMask_T_9 = {8{_out_frontMask_T_1}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_frontMask_T_10 = {8{_out_frontMask_T_2}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_frontMask_T_11 = {8{_out_frontMask_T_3}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_frontMask_T_12 = {8{_out_frontMask_T_4}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_frontMask_T_13 = {8{_out_frontMask_T_5}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_frontMask_T_14 = {8{_out_frontMask_T_6}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_frontMask_T_15 = {8{_out_frontMask_T_7}}; // @[RegisterRouter.scala:87:24]
wire [15:0] out_frontMask_lo_lo = {_out_frontMask_T_9, _out_frontMask_T_8}; // @[RegisterRouter.scala:87:24]
wire [15:0] out_frontMask_lo_hi = {_out_frontMask_T_11, _out_frontMask_T_10}; // @[RegisterRouter.scala:87:24]
wire [31:0] out_frontMask_lo = {out_frontMask_lo_hi, out_frontMask_lo_lo}; // @[RegisterRouter.scala:87:24]
wire [15:0] out_frontMask_hi_lo = {_out_frontMask_T_13, _out_frontMask_T_12}; // @[RegisterRouter.scala:87:24]
wire [15:0] out_frontMask_hi_hi = {_out_frontMask_T_15, _out_frontMask_T_14}; // @[RegisterRouter.scala:87:24]
wire [31:0] out_frontMask_hi = {out_frontMask_hi_hi, out_frontMask_hi_lo}; // @[RegisterRouter.scala:87:24]
wire [63:0] out_frontMask = {out_frontMask_hi, out_frontMask_lo}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_backMask_T_8 = {8{_out_backMask_T}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_backMask_T_9 = {8{_out_backMask_T_1}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_backMask_T_10 = {8{_out_backMask_T_2}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_backMask_T_11 = {8{_out_backMask_T_3}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_backMask_T_12 = {8{_out_backMask_T_4}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_backMask_T_13 = {8{_out_backMask_T_5}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_backMask_T_14 = {8{_out_backMask_T_6}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_backMask_T_15 = {8{_out_backMask_T_7}}; // @[RegisterRouter.scala:87:24]
wire [15:0] out_backMask_lo_lo = {_out_backMask_T_9, _out_backMask_T_8}; // @[RegisterRouter.scala:87:24]
wire [15:0] out_backMask_lo_hi = {_out_backMask_T_11, _out_backMask_T_10}; // @[RegisterRouter.scala:87:24]
wire [31:0] out_backMask_lo = {out_backMask_lo_hi, out_backMask_lo_lo}; // @[RegisterRouter.scala:87:24]
wire [15:0] out_backMask_hi_lo = {_out_backMask_T_13, _out_backMask_T_12}; // @[RegisterRouter.scala:87:24]
wire [15:0] out_backMask_hi_hi = {_out_backMask_T_15, _out_backMask_T_14}; // @[RegisterRouter.scala:87:24]
wire [31:0] out_backMask_hi = {out_backMask_hi_hi, out_backMask_hi_lo}; // @[RegisterRouter.scala:87:24]
wire [63:0] out_backMask = {out_backMask_hi, out_backMask_lo}; // @[RegisterRouter.scala:87:24]
wire _out_rimask_T = out_frontMask[0]; // @[RegisterRouter.scala:87:24]
wire _out_wimask_T = out_frontMask[0]; // @[RegisterRouter.scala:87:24]
wire out_rimask = _out_rimask_T; // @[RegisterRouter.scala:87:24]
wire out_wimask = _out_wimask_T; // @[RegisterRouter.scala:87:24]
wire _out_romask_T = out_backMask[0]; // @[RegisterRouter.scala:87:24]
wire _out_womask_T = out_backMask[0]; // @[RegisterRouter.scala:87:24]
wire out_romask = _out_romask_T; // @[RegisterRouter.scala:87:24]
wire out_womask = _out_womask_T; // @[RegisterRouter.scala:87:24]
wire out_f_rivalid = out_rivalid_0 & out_rimask; // @[RegisterRouter.scala:87:24]
wire _out_T_7 = out_f_rivalid; // @[RegisterRouter.scala:87:24]
wire out_f_roready = out_roready_0 & out_romask; // @[RegisterRouter.scala:87:24]
wire _out_T_8 = out_f_roready; // @[RegisterRouter.scala:87:24]
wire out_f_wivalid = out_wivalid_0 & out_wimask; // @[RegisterRouter.scala:87:24]
wire _out_T_9 = out_f_wivalid; // @[RegisterRouter.scala:87:24]
wire out_f_woready = out_woready_0 & out_womask; // @[RegisterRouter.scala:87:24]
wire _out_T_10 = out_f_woready; // @[RegisterRouter.scala:87:24]
wire _out_T_6 = out_front_bits_data[0]; // @[RegisterRouter.scala:87:24]
wire _out_T_11 = ~out_rimask; // @[RegisterRouter.scala:87:24]
wire _out_T_12 = ~out_wimask; // @[RegisterRouter.scala:87:24]
wire _out_T_13 = ~out_romask; // @[RegisterRouter.scala:87:24]
wire _out_T_14 = ~out_womask; // @[RegisterRouter.scala:87:24]
wire _out_T_16 = _out_T_15; // @[RegisterRouter.scala:87:24]
wire _out_prepend_T = _out_T_16; // @[RegisterRouter.scala:87:24]
wire [30:0] _out_rimask_T_1 = out_frontMask[31:1]; // @[RegisterRouter.scala:87:24]
wire [30:0] _out_wimask_T_1 = out_frontMask[31:1]; // @[RegisterRouter.scala:87:24]
wire out_rimask_1 = |_out_rimask_T_1; // @[RegisterRouter.scala:87:24]
wire out_wimask_1 = &_out_wimask_T_1; // @[RegisterRouter.scala:87:24]
wire [30:0] _out_romask_T_1 = out_backMask[31:1]; // @[RegisterRouter.scala:87:24]
wire [30:0] _out_womask_T_1 = out_backMask[31:1]; // @[RegisterRouter.scala:87:24]
wire out_romask_1 = |_out_romask_T_1; // @[RegisterRouter.scala:87:24]
wire out_womask_1 = &_out_womask_T_1; // @[RegisterRouter.scala:87:24]
wire out_f_rivalid_1 = out_rivalid_1 & out_rimask_1; // @[RegisterRouter.scala:87:24]
wire _out_T_18 = out_f_rivalid_1; // @[RegisterRouter.scala:87:24]
wire out_f_roready_1 = out_roready_1 & out_romask_1; // @[RegisterRouter.scala:87:24]
wire _out_T_19 = out_f_roready_1; // @[RegisterRouter.scala:87:24]
wire out_f_wivalid_1 = out_wivalid_1 & out_wimask_1; // @[RegisterRouter.scala:87:24]
wire out_f_woready_1 = out_woready_1 & out_womask_1; // @[RegisterRouter.scala:87:24]
wire [30:0] _out_T_17 = out_front_bits_data[31:1]; // @[RegisterRouter.scala:87:24]
wire _out_T_20 = ~out_rimask_1; // @[RegisterRouter.scala:87:24]
wire _out_T_21 = ~out_wimask_1; // @[RegisterRouter.scala:87:24]
wire _out_T_22 = ~out_romask_1; // @[RegisterRouter.scala:87:24]
wire _out_T_23 = ~out_womask_1; // @[RegisterRouter.scala:87:24]
wire [1:0] out_prepend = {1'h0, _out_prepend_T}; // @[RegisterRouter.scala:87:24]
wire [31:0] _out_T_24 = {30'h0, out_prepend}; // @[RegisterRouter.scala:87:24]
wire [31:0] _out_T_25 = _out_T_24; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_rimask_T_2 = out_frontMask[7:0]; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_wimask_T_2 = out_frontMask[7:0]; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_rimask_T_10 = out_frontMask[7:0]; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_wimask_T_10 = out_frontMask[7:0]; // @[RegisterRouter.scala:87:24]
wire out_rimask_2 = |_out_rimask_T_2; // @[RegisterRouter.scala:87:24]
wire out_wimask_2 = &_out_wimask_T_2; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_romask_T_2 = out_backMask[7:0]; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_womask_T_2 = out_backMask[7:0]; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_romask_T_10 = out_backMask[7:0]; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_womask_T_10 = out_backMask[7:0]; // @[RegisterRouter.scala:87:24]
wire out_romask_2 = |_out_romask_T_2; // @[RegisterRouter.scala:87:24]
wire out_womask_2 = &_out_womask_T_2; // @[RegisterRouter.scala:87:24]
wire out_f_rivalid_2 = out_rivalid_2 & out_rimask_2; // @[RegisterRouter.scala:87:24]
wire _out_T_27 = out_f_rivalid_2; // @[RegisterRouter.scala:87:24]
wire out_f_roready_2 = out_roready_2 & out_romask_2; // @[RegisterRouter.scala:87:24]
wire _out_T_28 = out_f_roready_2; // @[RegisterRouter.scala:87:24]
wire out_f_wivalid_2 = out_wivalid_2 & out_wimask_2; // @[RegisterRouter.scala:87:24]
wire _out_T_29 = out_f_wivalid_2; // @[RegisterRouter.scala:87:24]
assign out_f_woready_2 = out_woready_2 & out_womask_2; // @[RegisterRouter.scala:87:24]
assign valids_1_0 = out_f_woready_2; // @[RegisterRouter.scala:87:24]
wire _out_T_30 = out_f_woready_2; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_T_26 = out_front_bits_data[7:0]; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_T_114 = out_front_bits_data[7:0]; // @[RegisterRouter.scala:87:24]
assign newBytes_1_0 = out_f_woready_2 ? _out_T_26 : oldBytes_1_0; // @[RegisterRouter.scala:87:24]
wire _out_T_31 = ~out_rimask_2; // @[RegisterRouter.scala:87:24]
wire _out_T_32 = ~out_wimask_2; // @[RegisterRouter.scala:87:24]
wire _out_T_33 = ~out_romask_2; // @[RegisterRouter.scala:87:24]
wire _out_T_34 = ~out_womask_2; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_T_36 = _out_T_35; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_prepend_T_1 = _out_T_36; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_rimask_T_3 = out_frontMask[15:8]; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_wimask_T_3 = out_frontMask[15:8]; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_rimask_T_11 = out_frontMask[15:8]; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_wimask_T_11 = out_frontMask[15:8]; // @[RegisterRouter.scala:87:24]
wire out_rimask_3 = |_out_rimask_T_3; // @[RegisterRouter.scala:87:24]
wire out_wimask_3 = &_out_wimask_T_3; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_romask_T_3 = out_backMask[15:8]; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_womask_T_3 = out_backMask[15:8]; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_romask_T_11 = out_backMask[15:8]; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_womask_T_11 = out_backMask[15:8]; // @[RegisterRouter.scala:87:24]
wire out_romask_3 = |_out_romask_T_3; // @[RegisterRouter.scala:87:24]
wire out_womask_3 = &_out_womask_T_3; // @[RegisterRouter.scala:87:24]
wire out_f_rivalid_3 = out_rivalid_3 & out_rimask_3; // @[RegisterRouter.scala:87:24]
wire _out_T_38 = out_f_rivalid_3; // @[RegisterRouter.scala:87:24]
wire out_f_roready_3 = out_roready_3 & out_romask_3; // @[RegisterRouter.scala:87:24]
wire _out_T_39 = out_f_roready_3; // @[RegisterRouter.scala:87:24]
wire out_f_wivalid_3 = out_wivalid_3 & out_wimask_3; // @[RegisterRouter.scala:87:24]
wire _out_T_40 = out_f_wivalid_3; // @[RegisterRouter.scala:87:24]
assign out_f_woready_3 = out_woready_3 & out_womask_3; // @[RegisterRouter.scala:87:24]
assign valids_1_1 = out_f_woready_3; // @[RegisterRouter.scala:87:24]
wire _out_T_41 = out_f_woready_3; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_T_37 = out_front_bits_data[15:8]; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_T_125 = out_front_bits_data[15:8]; // @[RegisterRouter.scala:87:24]
assign newBytes_1_1 = out_f_woready_3 ? _out_T_37 : oldBytes_1_1; // @[RegisterRouter.scala:87:24]
wire _out_T_42 = ~out_rimask_3; // @[RegisterRouter.scala:87:24]
wire _out_T_43 = ~out_wimask_3; // @[RegisterRouter.scala:87:24]
wire _out_T_44 = ~out_romask_3; // @[RegisterRouter.scala:87:24]
wire _out_T_45 = ~out_womask_3; // @[RegisterRouter.scala:87:24]
wire [15:0] out_prepend_1 = {oldBytes_1_1, _out_prepend_T_1}; // @[RegisterRouter.scala:87:24]
wire [15:0] _out_T_46 = out_prepend_1; // @[RegisterRouter.scala:87:24]
wire [15:0] _out_T_47 = _out_T_46; // @[RegisterRouter.scala:87:24]
wire [15:0] _out_prepend_T_2 = _out_T_47; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_rimask_T_4 = out_frontMask[23:16]; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_wimask_T_4 = out_frontMask[23:16]; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_rimask_T_12 = out_frontMask[23:16]; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_wimask_T_12 = out_frontMask[23:16]; // @[RegisterRouter.scala:87:24]
wire out_rimask_4 = |_out_rimask_T_4; // @[RegisterRouter.scala:87:24]
wire out_wimask_4 = &_out_wimask_T_4; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_romask_T_4 = out_backMask[23:16]; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_womask_T_4 = out_backMask[23:16]; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_romask_T_12 = out_backMask[23:16]; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_womask_T_12 = out_backMask[23:16]; // @[RegisterRouter.scala:87:24]
wire out_romask_4 = |_out_romask_T_4; // @[RegisterRouter.scala:87:24]
wire out_womask_4 = &_out_womask_T_4; // @[RegisterRouter.scala:87:24]
wire out_f_rivalid_4 = out_rivalid_4 & out_rimask_4; // @[RegisterRouter.scala:87:24]
wire _out_T_49 = out_f_rivalid_4; // @[RegisterRouter.scala:87:24]
wire out_f_roready_4 = out_roready_4 & out_romask_4; // @[RegisterRouter.scala:87:24]
wire _out_T_50 = out_f_roready_4; // @[RegisterRouter.scala:87:24]
wire out_f_wivalid_4 = out_wivalid_4 & out_wimask_4; // @[RegisterRouter.scala:87:24]
wire _out_T_51 = out_f_wivalid_4; // @[RegisterRouter.scala:87:24]
assign out_f_woready_4 = out_woready_4 & out_womask_4; // @[RegisterRouter.scala:87:24]
assign valids_1_2 = out_f_woready_4; // @[RegisterRouter.scala:87:24]
wire _out_T_52 = out_f_woready_4; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_T_48 = out_front_bits_data[23:16]; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_T_136 = out_front_bits_data[23:16]; // @[RegisterRouter.scala:87:24]
assign newBytes_1_2 = out_f_woready_4 ? _out_T_48 : oldBytes_1_2; // @[RegisterRouter.scala:87:24]
wire _out_T_53 = ~out_rimask_4; // @[RegisterRouter.scala:87:24]
wire _out_T_54 = ~out_wimask_4; // @[RegisterRouter.scala:87:24]
wire _out_T_55 = ~out_romask_4; // @[RegisterRouter.scala:87:24]
wire _out_T_56 = ~out_womask_4; // @[RegisterRouter.scala:87:24]
wire [23:0] out_prepend_2 = {oldBytes_1_2, _out_prepend_T_2}; // @[RegisterRouter.scala:87:24]
wire [23:0] _out_T_57 = out_prepend_2; // @[RegisterRouter.scala:87:24]
wire [23:0] _out_T_58 = _out_T_57; // @[RegisterRouter.scala:87:24]
wire [23:0] _out_prepend_T_3 = _out_T_58; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_rimask_T_5 = out_frontMask[31:24]; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_wimask_T_5 = out_frontMask[31:24]; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_rimask_T_13 = out_frontMask[31:24]; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_wimask_T_13 = out_frontMask[31:24]; // @[RegisterRouter.scala:87:24]
wire out_rimask_5 = |_out_rimask_T_5; // @[RegisterRouter.scala:87:24]
wire out_wimask_5 = &_out_wimask_T_5; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_romask_T_5 = out_backMask[31:24]; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_womask_T_5 = out_backMask[31:24]; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_romask_T_13 = out_backMask[31:24]; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_womask_T_13 = out_backMask[31:24]; // @[RegisterRouter.scala:87:24]
wire out_romask_5 = |_out_romask_T_5; // @[RegisterRouter.scala:87:24]
wire out_womask_5 = &_out_womask_T_5; // @[RegisterRouter.scala:87:24]
wire out_f_rivalid_5 = out_rivalid_5 & out_rimask_5; // @[RegisterRouter.scala:87:24]
wire _out_T_60 = out_f_rivalid_5; // @[RegisterRouter.scala:87:24]
wire out_f_roready_5 = out_roready_5 & out_romask_5; // @[RegisterRouter.scala:87:24]
wire _out_T_61 = out_f_roready_5; // @[RegisterRouter.scala:87:24]
wire out_f_wivalid_5 = out_wivalid_5 & out_wimask_5; // @[RegisterRouter.scala:87:24]
wire _out_T_62 = out_f_wivalid_5; // @[RegisterRouter.scala:87:24]
assign out_f_woready_5 = out_woready_5 & out_womask_5; // @[RegisterRouter.scala:87:24]
assign valids_1_3 = out_f_woready_5; // @[RegisterRouter.scala:87:24]
wire _out_T_63 = out_f_woready_5; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_T_59 = out_front_bits_data[31:24]; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_T_147 = out_front_bits_data[31:24]; // @[RegisterRouter.scala:87:24]
assign newBytes_1_3 = out_f_woready_5 ? _out_T_59 : oldBytes_1_3; // @[RegisterRouter.scala:87:24]
wire _out_T_64 = ~out_rimask_5; // @[RegisterRouter.scala:87:24]
wire _out_T_65 = ~out_wimask_5; // @[RegisterRouter.scala:87:24]
wire _out_T_66 = ~out_romask_5; // @[RegisterRouter.scala:87:24]
wire _out_T_67 = ~out_womask_5; // @[RegisterRouter.scala:87:24]
wire [31:0] out_prepend_3 = {oldBytes_1_3, _out_prepend_T_3}; // @[RegisterRouter.scala:87:24]
wire [31:0] _out_T_68 = out_prepend_3; // @[RegisterRouter.scala:87:24]
wire [31:0] _out_T_69 = _out_T_68; // @[RegisterRouter.scala:87:24]
wire [31:0] _out_prepend_T_4 = _out_T_69; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_rimask_T_6 = out_frontMask[39:32]; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_wimask_T_6 = out_frontMask[39:32]; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_rimask_T_14 = out_frontMask[39:32]; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_wimask_T_14 = out_frontMask[39:32]; // @[RegisterRouter.scala:87:24]
wire out_rimask_6 = |_out_rimask_T_6; // @[RegisterRouter.scala:87:24]
wire out_wimask_6 = &_out_wimask_T_6; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_romask_T_6 = out_backMask[39:32]; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_womask_T_6 = out_backMask[39:32]; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_romask_T_14 = out_backMask[39:32]; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_womask_T_14 = out_backMask[39:32]; // @[RegisterRouter.scala:87:24]
wire out_romask_6 = |_out_romask_T_6; // @[RegisterRouter.scala:87:24]
wire out_womask_6 = &_out_womask_T_6; // @[RegisterRouter.scala:87:24]
wire out_f_rivalid_6 = out_rivalid_6 & out_rimask_6; // @[RegisterRouter.scala:87:24]
wire _out_T_71 = out_f_rivalid_6; // @[RegisterRouter.scala:87:24]
wire out_f_roready_6 = out_roready_6 & out_romask_6; // @[RegisterRouter.scala:87:24]
wire _out_T_72 = out_f_roready_6; // @[RegisterRouter.scala:87:24]
wire out_f_wivalid_6 = out_wivalid_6 & out_wimask_6; // @[RegisterRouter.scala:87:24]
wire _out_T_73 = out_f_wivalid_6; // @[RegisterRouter.scala:87:24]
assign out_f_woready_6 = out_woready_6 & out_womask_6; // @[RegisterRouter.scala:87:24]
assign valids_1_4 = out_f_woready_6; // @[RegisterRouter.scala:87:24]
wire _out_T_74 = out_f_woready_6; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_T_70 = out_front_bits_data[39:32]; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_T_158 = out_front_bits_data[39:32]; // @[RegisterRouter.scala:87:24]
assign newBytes_1_4 = out_f_woready_6 ? _out_T_70 : oldBytes_1_4; // @[RegisterRouter.scala:87:24]
wire _out_T_75 = ~out_rimask_6; // @[RegisterRouter.scala:87:24]
wire _out_T_76 = ~out_wimask_6; // @[RegisterRouter.scala:87:24]
wire _out_T_77 = ~out_romask_6; // @[RegisterRouter.scala:87:24]
wire _out_T_78 = ~out_womask_6; // @[RegisterRouter.scala:87:24]
wire [39:0] out_prepend_4 = {oldBytes_1_4, _out_prepend_T_4}; // @[RegisterRouter.scala:87:24]
wire [39:0] _out_T_79 = out_prepend_4; // @[RegisterRouter.scala:87:24]
wire [39:0] _out_T_80 = _out_T_79; // @[RegisterRouter.scala:87:24]
wire [39:0] _out_prepend_T_5 = _out_T_80; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_rimask_T_7 = out_frontMask[47:40]; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_wimask_T_7 = out_frontMask[47:40]; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_rimask_T_15 = out_frontMask[47:40]; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_wimask_T_15 = out_frontMask[47:40]; // @[RegisterRouter.scala:87:24]
wire out_rimask_7 = |_out_rimask_T_7; // @[RegisterRouter.scala:87:24]
wire out_wimask_7 = &_out_wimask_T_7; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_romask_T_7 = out_backMask[47:40]; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_womask_T_7 = out_backMask[47:40]; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_romask_T_15 = out_backMask[47:40]; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_womask_T_15 = out_backMask[47:40]; // @[RegisterRouter.scala:87:24]
wire out_romask_7 = |_out_romask_T_7; // @[RegisterRouter.scala:87:24]
wire out_womask_7 = &_out_womask_T_7; // @[RegisterRouter.scala:87:24]
wire out_f_rivalid_7 = out_rivalid_7 & out_rimask_7; // @[RegisterRouter.scala:87:24]
wire _out_T_82 = out_f_rivalid_7; // @[RegisterRouter.scala:87:24]
wire out_f_roready_7 = out_roready_7 & out_romask_7; // @[RegisterRouter.scala:87:24]
wire _out_T_83 = out_f_roready_7; // @[RegisterRouter.scala:87:24]
wire out_f_wivalid_7 = out_wivalid_7 & out_wimask_7; // @[RegisterRouter.scala:87:24]
wire _out_T_84 = out_f_wivalid_7; // @[RegisterRouter.scala:87:24]
assign out_f_woready_7 = out_woready_7 & out_womask_7; // @[RegisterRouter.scala:87:24]
assign valids_1_5 = out_f_woready_7; // @[RegisterRouter.scala:87:24]
wire _out_T_85 = out_f_woready_7; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_T_81 = out_front_bits_data[47:40]; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_T_169 = out_front_bits_data[47:40]; // @[RegisterRouter.scala:87:24]
assign newBytes_1_5 = out_f_woready_7 ? _out_T_81 : oldBytes_1_5; // @[RegisterRouter.scala:87:24]
wire _out_T_86 = ~out_rimask_7; // @[RegisterRouter.scala:87:24]
wire _out_T_87 = ~out_wimask_7; // @[RegisterRouter.scala:87:24]
wire _out_T_88 = ~out_romask_7; // @[RegisterRouter.scala:87:24]
wire _out_T_89 = ~out_womask_7; // @[RegisterRouter.scala:87:24]
wire [47:0] out_prepend_5 = {oldBytes_1_5, _out_prepend_T_5}; // @[RegisterRouter.scala:87:24]
wire [47:0] _out_T_90 = out_prepend_5; // @[RegisterRouter.scala:87:24]
wire [47:0] _out_T_91 = _out_T_90; // @[RegisterRouter.scala:87:24]
wire [47:0] _out_prepend_T_6 = _out_T_91; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_rimask_T_8 = out_frontMask[55:48]; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_wimask_T_8 = out_frontMask[55:48]; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_rimask_T_16 = out_frontMask[55:48]; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_wimask_T_16 = out_frontMask[55:48]; // @[RegisterRouter.scala:87:24]
wire out_rimask_8 = |_out_rimask_T_8; // @[RegisterRouter.scala:87:24]
wire out_wimask_8 = &_out_wimask_T_8; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_romask_T_8 = out_backMask[55:48]; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_womask_T_8 = out_backMask[55:48]; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_romask_T_16 = out_backMask[55:48]; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_womask_T_16 = out_backMask[55:48]; // @[RegisterRouter.scala:87:24]
wire out_romask_8 = |_out_romask_T_8; // @[RegisterRouter.scala:87:24]
wire out_womask_8 = &_out_womask_T_8; // @[RegisterRouter.scala:87:24]
wire out_f_rivalid_8 = out_rivalid_8 & out_rimask_8; // @[RegisterRouter.scala:87:24]
wire _out_T_93 = out_f_rivalid_8; // @[RegisterRouter.scala:87:24]
wire out_f_roready_8 = out_roready_8 & out_romask_8; // @[RegisterRouter.scala:87:24]
wire _out_T_94 = out_f_roready_8; // @[RegisterRouter.scala:87:24]
wire out_f_wivalid_8 = out_wivalid_8 & out_wimask_8; // @[RegisterRouter.scala:87:24]
wire _out_T_95 = out_f_wivalid_8; // @[RegisterRouter.scala:87:24]
assign out_f_woready_8 = out_woready_8 & out_womask_8; // @[RegisterRouter.scala:87:24]
assign valids_1_6 = out_f_woready_8; // @[RegisterRouter.scala:87:24]
wire _out_T_96 = out_f_woready_8; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_T_92 = out_front_bits_data[55:48]; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_T_180 = out_front_bits_data[55:48]; // @[RegisterRouter.scala:87:24]
assign newBytes_1_6 = out_f_woready_8 ? _out_T_92 : oldBytes_1_6; // @[RegisterRouter.scala:87:24]
wire _out_T_97 = ~out_rimask_8; // @[RegisterRouter.scala:87:24]
wire _out_T_98 = ~out_wimask_8; // @[RegisterRouter.scala:87:24]
wire _out_T_99 = ~out_romask_8; // @[RegisterRouter.scala:87:24]
wire _out_T_100 = ~out_womask_8; // @[RegisterRouter.scala:87:24]
wire [55:0] out_prepend_6 = {oldBytes_1_6, _out_prepend_T_6}; // @[RegisterRouter.scala:87:24]
wire [55:0] _out_T_101 = out_prepend_6; // @[RegisterRouter.scala:87:24]
wire [55:0] _out_T_102 = _out_T_101; // @[RegisterRouter.scala:87:24]
wire [55:0] _out_prepend_T_7 = _out_T_102; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_rimask_T_9 = out_frontMask[63:56]; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_wimask_T_9 = out_frontMask[63:56]; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_rimask_T_17 = out_frontMask[63:56]; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_wimask_T_17 = out_frontMask[63:56]; // @[RegisterRouter.scala:87:24]
wire out_rimask_9 = |_out_rimask_T_9; // @[RegisterRouter.scala:87:24]
wire out_wimask_9 = &_out_wimask_T_9; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_romask_T_9 = out_backMask[63:56]; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_womask_T_9 = out_backMask[63:56]; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_romask_T_17 = out_backMask[63:56]; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_womask_T_17 = out_backMask[63:56]; // @[RegisterRouter.scala:87:24]
wire out_romask_9 = |_out_romask_T_9; // @[RegisterRouter.scala:87:24]
wire out_womask_9 = &_out_womask_T_9; // @[RegisterRouter.scala:87:24]
wire out_f_rivalid_9 = out_rivalid_9 & out_rimask_9; // @[RegisterRouter.scala:87:24]
wire _out_T_104 = out_f_rivalid_9; // @[RegisterRouter.scala:87:24]
wire out_f_roready_9 = out_roready_9 & out_romask_9; // @[RegisterRouter.scala:87:24]
wire _out_T_105 = out_f_roready_9; // @[RegisterRouter.scala:87:24]
wire out_f_wivalid_9 = out_wivalid_9 & out_wimask_9; // @[RegisterRouter.scala:87:24]
wire _out_T_106 = out_f_wivalid_9; // @[RegisterRouter.scala:87:24]
assign out_f_woready_9 = out_woready_9 & out_womask_9; // @[RegisterRouter.scala:87:24]
assign valids_1_7 = out_f_woready_9; // @[RegisterRouter.scala:87:24]
wire _out_T_107 = out_f_woready_9; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_T_103 = out_front_bits_data[63:56]; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_T_191 = out_front_bits_data[63:56]; // @[RegisterRouter.scala:87:24]
assign newBytes_1_7 = out_f_woready_9 ? _out_T_103 : oldBytes_1_7; // @[RegisterRouter.scala:87:24]
wire _out_T_108 = ~out_rimask_9; // @[RegisterRouter.scala:87:24]
wire _out_T_109 = ~out_wimask_9; // @[RegisterRouter.scala:87:24]
wire _out_T_110 = ~out_romask_9; // @[RegisterRouter.scala:87:24]
wire _out_T_111 = ~out_womask_9; // @[RegisterRouter.scala:87:24]
wire [63:0] out_prepend_7 = {oldBytes_1_7, _out_prepend_T_7}; // @[RegisterRouter.scala:87:24]
wire [63:0] _out_T_112 = out_prepend_7; // @[RegisterRouter.scala:87:24]
wire [63:0] _out_T_113 = _out_T_112; // @[RegisterRouter.scala:87:24]
wire [63:0] _out_out_bits_data_WIRE_1_2 = _out_T_113; // @[MuxLiteral.scala:49:48]
wire out_rimask_10 = |_out_rimask_T_10; // @[RegisterRouter.scala:87:24]
wire out_wimask_10 = &_out_wimask_T_10; // @[RegisterRouter.scala:87:24]
wire out_romask_10 = |_out_romask_T_10; // @[RegisterRouter.scala:87:24]
wire out_womask_10 = &_out_womask_T_10; // @[RegisterRouter.scala:87:24]
wire out_f_rivalid_10 = out_rivalid_10 & out_rimask_10; // @[RegisterRouter.scala:87:24]
wire _out_T_115 = out_f_rivalid_10; // @[RegisterRouter.scala:87:24]
wire out_f_roready_10 = out_roready_10 & out_romask_10; // @[RegisterRouter.scala:87:24]
wire _out_T_116 = out_f_roready_10; // @[RegisterRouter.scala:87:24]
wire out_f_wivalid_10 = out_wivalid_10 & out_wimask_10; // @[RegisterRouter.scala:87:24]
wire _out_T_117 = out_f_wivalid_10; // @[RegisterRouter.scala:87:24]
assign out_f_woready_10 = out_woready_10 & out_womask_10; // @[RegisterRouter.scala:87:24]
assign valids_0 = out_f_woready_10; // @[RegisterRouter.scala:87:24]
wire _out_T_118 = out_f_woready_10; // @[RegisterRouter.scala:87:24]
assign newBytes_0 = out_f_woready_10 ? _out_T_114 : oldBytes_0; // @[RegisterRouter.scala:87:24]
wire _out_T_119 = ~out_rimask_10; // @[RegisterRouter.scala:87:24]
wire _out_T_120 = ~out_wimask_10; // @[RegisterRouter.scala:87:24]
wire _out_T_121 = ~out_romask_10; // @[RegisterRouter.scala:87:24]
wire _out_T_122 = ~out_womask_10; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_T_124 = _out_T_123; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_prepend_T_8 = _out_T_124; // @[RegisterRouter.scala:87:24]
wire out_rimask_11 = |_out_rimask_T_11; // @[RegisterRouter.scala:87:24]
wire out_wimask_11 = &_out_wimask_T_11; // @[RegisterRouter.scala:87:24]
wire out_romask_11 = |_out_romask_T_11; // @[RegisterRouter.scala:87:24]
wire out_womask_11 = &_out_womask_T_11; // @[RegisterRouter.scala:87:24]
wire out_f_rivalid_11 = out_rivalid_11 & out_rimask_11; // @[RegisterRouter.scala:87:24]
wire _out_T_126 = out_f_rivalid_11; // @[RegisterRouter.scala:87:24]
wire out_f_roready_11 = out_roready_11 & out_romask_11; // @[RegisterRouter.scala:87:24]
wire _out_T_127 = out_f_roready_11; // @[RegisterRouter.scala:87:24]
wire out_f_wivalid_11 = out_wivalid_11 & out_wimask_11; // @[RegisterRouter.scala:87:24]
wire _out_T_128 = out_f_wivalid_11; // @[RegisterRouter.scala:87:24]
assign out_f_woready_11 = out_woready_11 & out_womask_11; // @[RegisterRouter.scala:87:24]
assign valids_1 = out_f_woready_11; // @[RegisterRouter.scala:87:24]
wire _out_T_129 = out_f_woready_11; // @[RegisterRouter.scala:87:24]
assign newBytes_1 = out_f_woready_11 ? _out_T_125 : oldBytes_1; // @[RegisterRouter.scala:87:24]
wire _out_T_130 = ~out_rimask_11; // @[RegisterRouter.scala:87:24]
wire _out_T_131 = ~out_wimask_11; // @[RegisterRouter.scala:87:24]
wire _out_T_132 = ~out_romask_11; // @[RegisterRouter.scala:87:24]
wire _out_T_133 = ~out_womask_11; // @[RegisterRouter.scala:87:24]
wire [15:0] out_prepend_8 = {oldBytes_1, _out_prepend_T_8}; // @[RegisterRouter.scala:87:24]
wire [15:0] _out_T_134 = out_prepend_8; // @[RegisterRouter.scala:87:24]
wire [15:0] _out_T_135 = _out_T_134; // @[RegisterRouter.scala:87:24]
wire [15:0] _out_prepend_T_9 = _out_T_135; // @[RegisterRouter.scala:87:24]
wire out_rimask_12 = |_out_rimask_T_12; // @[RegisterRouter.scala:87:24]
wire out_wimask_12 = &_out_wimask_T_12; // @[RegisterRouter.scala:87:24]
wire out_romask_12 = |_out_romask_T_12; // @[RegisterRouter.scala:87:24]
wire out_womask_12 = &_out_womask_T_12; // @[RegisterRouter.scala:87:24]
wire out_f_rivalid_12 = out_rivalid_12 & out_rimask_12; // @[RegisterRouter.scala:87:24]
wire _out_T_137 = out_f_rivalid_12; // @[RegisterRouter.scala:87:24]
wire out_f_roready_12 = out_roready_12 & out_romask_12; // @[RegisterRouter.scala:87:24]
wire _out_T_138 = out_f_roready_12; // @[RegisterRouter.scala:87:24]
wire out_f_wivalid_12 = out_wivalid_12 & out_wimask_12; // @[RegisterRouter.scala:87:24]
wire _out_T_139 = out_f_wivalid_12; // @[RegisterRouter.scala:87:24]
assign out_f_woready_12 = out_woready_12 & out_womask_12; // @[RegisterRouter.scala:87:24]
assign valids_2 = out_f_woready_12; // @[RegisterRouter.scala:87:24]
wire _out_T_140 = out_f_woready_12; // @[RegisterRouter.scala:87:24]
assign newBytes_2 = out_f_woready_12 ? _out_T_136 : oldBytes_2; // @[RegisterRouter.scala:87:24]
wire _out_T_141 = ~out_rimask_12; // @[RegisterRouter.scala:87:24]
wire _out_T_142 = ~out_wimask_12; // @[RegisterRouter.scala:87:24]
wire _out_T_143 = ~out_romask_12; // @[RegisterRouter.scala:87:24]
wire _out_T_144 = ~out_womask_12; // @[RegisterRouter.scala:87:24]
wire [23:0] out_prepend_9 = {oldBytes_2, _out_prepend_T_9}; // @[RegisterRouter.scala:87:24]
wire [23:0] _out_T_145 = out_prepend_9; // @[RegisterRouter.scala:87:24]
wire [23:0] _out_T_146 = _out_T_145; // @[RegisterRouter.scala:87:24]
wire [23:0] _out_prepend_T_10 = _out_T_146; // @[RegisterRouter.scala:87:24]
wire out_rimask_13 = |_out_rimask_T_13; // @[RegisterRouter.scala:87:24]
wire out_wimask_13 = &_out_wimask_T_13; // @[RegisterRouter.scala:87:24]
wire out_romask_13 = |_out_romask_T_13; // @[RegisterRouter.scala:87:24]
wire out_womask_13 = &_out_womask_T_13; // @[RegisterRouter.scala:87:24]
wire out_f_rivalid_13 = out_rivalid_13 & out_rimask_13; // @[RegisterRouter.scala:87:24]
wire _out_T_148 = out_f_rivalid_13; // @[RegisterRouter.scala:87:24]
wire out_f_roready_13 = out_roready_13 & out_romask_13; // @[RegisterRouter.scala:87:24]
wire _out_T_149 = out_f_roready_13; // @[RegisterRouter.scala:87:24]
wire out_f_wivalid_13 = out_wivalid_13 & out_wimask_13; // @[RegisterRouter.scala:87:24]
wire _out_T_150 = out_f_wivalid_13; // @[RegisterRouter.scala:87:24]
assign out_f_woready_13 = out_woready_13 & out_womask_13; // @[RegisterRouter.scala:87:24]
assign valids_3 = out_f_woready_13; // @[RegisterRouter.scala:87:24]
wire _out_T_151 = out_f_woready_13; // @[RegisterRouter.scala:87:24]
assign newBytes_3 = out_f_woready_13 ? _out_T_147 : oldBytes_3; // @[RegisterRouter.scala:87:24]
wire _out_T_152 = ~out_rimask_13; // @[RegisterRouter.scala:87:24]
wire _out_T_153 = ~out_wimask_13; // @[RegisterRouter.scala:87:24]
wire _out_T_154 = ~out_romask_13; // @[RegisterRouter.scala:87:24]
wire _out_T_155 = ~out_womask_13; // @[RegisterRouter.scala:87:24]
wire [31:0] out_prepend_10 = {oldBytes_3, _out_prepend_T_10}; // @[RegisterRouter.scala:87:24]
wire [31:0] _out_T_156 = out_prepend_10; // @[RegisterRouter.scala:87:24]
wire [31:0] _out_T_157 = _out_T_156; // @[RegisterRouter.scala:87:24]
wire [31:0] _out_prepend_T_11 = _out_T_157; // @[RegisterRouter.scala:87:24]
wire out_rimask_14 = |_out_rimask_T_14; // @[RegisterRouter.scala:87:24]
wire out_wimask_14 = &_out_wimask_T_14; // @[RegisterRouter.scala:87:24]
wire out_romask_14 = |_out_romask_T_14; // @[RegisterRouter.scala:87:24]
wire out_womask_14 = &_out_womask_T_14; // @[RegisterRouter.scala:87:24]
wire out_f_rivalid_14 = out_rivalid_14 & out_rimask_14; // @[RegisterRouter.scala:87:24]
wire _out_T_159 = out_f_rivalid_14; // @[RegisterRouter.scala:87:24]
wire out_f_roready_14 = out_roready_14 & out_romask_14; // @[RegisterRouter.scala:87:24]
wire _out_T_160 = out_f_roready_14; // @[RegisterRouter.scala:87:24]
wire out_f_wivalid_14 = out_wivalid_14 & out_wimask_14; // @[RegisterRouter.scala:87:24]
wire _out_T_161 = out_f_wivalid_14; // @[RegisterRouter.scala:87:24]
assign out_f_woready_14 = out_woready_14 & out_womask_14; // @[RegisterRouter.scala:87:24]
assign valids_4 = out_f_woready_14; // @[RegisterRouter.scala:87:24]
wire _out_T_162 = out_f_woready_14; // @[RegisterRouter.scala:87:24]
assign newBytes_4 = out_f_woready_14 ? _out_T_158 : oldBytes_4; // @[RegisterRouter.scala:87:24]
wire _out_T_163 = ~out_rimask_14; // @[RegisterRouter.scala:87:24]
wire _out_T_164 = ~out_wimask_14; // @[RegisterRouter.scala:87:24]
wire _out_T_165 = ~out_romask_14; // @[RegisterRouter.scala:87:24]
wire _out_T_166 = ~out_womask_14; // @[RegisterRouter.scala:87:24]
wire [39:0] out_prepend_11 = {oldBytes_4, _out_prepend_T_11}; // @[RegisterRouter.scala:87:24]
wire [39:0] _out_T_167 = out_prepend_11; // @[RegisterRouter.scala:87:24]
wire [39:0] _out_T_168 = _out_T_167; // @[RegisterRouter.scala:87:24]
wire [39:0] _out_prepend_T_12 = _out_T_168; // @[RegisterRouter.scala:87:24]
wire out_rimask_15 = |_out_rimask_T_15; // @[RegisterRouter.scala:87:24]
wire out_wimask_15 = &_out_wimask_T_15; // @[RegisterRouter.scala:87:24]
wire out_romask_15 = |_out_romask_T_15; // @[RegisterRouter.scala:87:24]
wire out_womask_15 = &_out_womask_T_15; // @[RegisterRouter.scala:87:24]
wire out_f_rivalid_15 = out_rivalid_15 & out_rimask_15; // @[RegisterRouter.scala:87:24]
wire _out_T_170 = out_f_rivalid_15; // @[RegisterRouter.scala:87:24]
wire out_f_roready_15 = out_roready_15 & out_romask_15; // @[RegisterRouter.scala:87:24]
wire _out_T_171 = out_f_roready_15; // @[RegisterRouter.scala:87:24]
wire out_f_wivalid_15 = out_wivalid_15 & out_wimask_15; // @[RegisterRouter.scala:87:24]
wire _out_T_172 = out_f_wivalid_15; // @[RegisterRouter.scala:87:24]
assign out_f_woready_15 = out_woready_15 & out_womask_15; // @[RegisterRouter.scala:87:24]
assign valids_5 = out_f_woready_15; // @[RegisterRouter.scala:87:24]
wire _out_T_173 = out_f_woready_15; // @[RegisterRouter.scala:87:24]
assign newBytes_5 = out_f_woready_15 ? _out_T_169 : oldBytes_5; // @[RegisterRouter.scala:87:24]
wire _out_T_174 = ~out_rimask_15; // @[RegisterRouter.scala:87:24]
wire _out_T_175 = ~out_wimask_15; // @[RegisterRouter.scala:87:24]
wire _out_T_176 = ~out_romask_15; // @[RegisterRouter.scala:87:24]
wire _out_T_177 = ~out_womask_15; // @[RegisterRouter.scala:87:24]
wire [47:0] out_prepend_12 = {oldBytes_5, _out_prepend_T_12}; // @[RegisterRouter.scala:87:24]
wire [47:0] _out_T_178 = out_prepend_12; // @[RegisterRouter.scala:87:24]
wire [47:0] _out_T_179 = _out_T_178; // @[RegisterRouter.scala:87:24]
wire [47:0] _out_prepend_T_13 = _out_T_179; // @[RegisterRouter.scala:87:24]
wire out_rimask_16 = |_out_rimask_T_16; // @[RegisterRouter.scala:87:24]
wire out_wimask_16 = &_out_wimask_T_16; // @[RegisterRouter.scala:87:24]
wire out_romask_16 = |_out_romask_T_16; // @[RegisterRouter.scala:87:24]
wire out_womask_16 = &_out_womask_T_16; // @[RegisterRouter.scala:87:24]
wire out_f_rivalid_16 = out_rivalid_16 & out_rimask_16; // @[RegisterRouter.scala:87:24]
wire _out_T_181 = out_f_rivalid_16; // @[RegisterRouter.scala:87:24]
wire out_f_roready_16 = out_roready_16 & out_romask_16; // @[RegisterRouter.scala:87:24]
wire _out_T_182 = out_f_roready_16; // @[RegisterRouter.scala:87:24]
wire out_f_wivalid_16 = out_wivalid_16 & out_wimask_16; // @[RegisterRouter.scala:87:24]
wire _out_T_183 = out_f_wivalid_16; // @[RegisterRouter.scala:87:24]
assign out_f_woready_16 = out_woready_16 & out_womask_16; // @[RegisterRouter.scala:87:24]
assign valids_6 = out_f_woready_16; // @[RegisterRouter.scala:87:24]
wire _out_T_184 = out_f_woready_16; // @[RegisterRouter.scala:87:24]
assign newBytes_6 = out_f_woready_16 ? _out_T_180 : oldBytes_6; // @[RegisterRouter.scala:87:24]
wire _out_T_185 = ~out_rimask_16; // @[RegisterRouter.scala:87:24]
wire _out_T_186 = ~out_wimask_16; // @[RegisterRouter.scala:87:24]
wire _out_T_187 = ~out_romask_16; // @[RegisterRouter.scala:87:24]
wire _out_T_188 = ~out_womask_16; // @[RegisterRouter.scala:87:24]
wire [55:0] out_prepend_13 = {oldBytes_6, _out_prepend_T_13}; // @[RegisterRouter.scala:87:24]
wire [55:0] _out_T_189 = out_prepend_13; // @[RegisterRouter.scala:87:24]
wire [55:0] _out_T_190 = _out_T_189; // @[RegisterRouter.scala:87:24]
wire [55:0] _out_prepend_T_14 = _out_T_190; // @[RegisterRouter.scala:87:24]
wire out_rimask_17 = |_out_rimask_T_17; // @[RegisterRouter.scala:87:24]
wire out_wimask_17 = &_out_wimask_T_17; // @[RegisterRouter.scala:87:24]
wire out_romask_17 = |_out_romask_T_17; // @[RegisterRouter.scala:87:24]
wire out_womask_17 = &_out_womask_T_17; // @[RegisterRouter.scala:87:24]
wire out_f_rivalid_17 = out_rivalid_17 & out_rimask_17; // @[RegisterRouter.scala:87:24]
wire _out_T_192 = out_f_rivalid_17; // @[RegisterRouter.scala:87:24]
wire out_f_roready_17 = out_roready_17 & out_romask_17; // @[RegisterRouter.scala:87:24]
wire _out_T_193 = out_f_roready_17; // @[RegisterRouter.scala:87:24]
wire out_f_wivalid_17 = out_wivalid_17 & out_wimask_17; // @[RegisterRouter.scala:87:24]
wire _out_T_194 = out_f_wivalid_17; // @[RegisterRouter.scala:87:24]
assign out_f_woready_17 = out_woready_17 & out_womask_17; // @[RegisterRouter.scala:87:24]
assign valids_7 = out_f_woready_17; // @[RegisterRouter.scala:87:24]
wire _out_T_195 = out_f_woready_17; // @[RegisterRouter.scala:87:24]
assign newBytes_7 = out_f_woready_17 ? _out_T_191 : oldBytes_7; // @[RegisterRouter.scala:87:24]
wire _out_T_196 = ~out_rimask_17; // @[RegisterRouter.scala:87:24]
wire _out_T_197 = ~out_wimask_17; // @[RegisterRouter.scala:87:24]
wire _out_T_198 = ~out_romask_17; // @[RegisterRouter.scala:87:24]
wire _out_T_199 = ~out_womask_17; // @[RegisterRouter.scala:87:24]
wire [63:0] out_prepend_14 = {oldBytes_7, _out_prepend_T_14}; // @[RegisterRouter.scala:87:24]
wire [63:0] _out_T_200 = out_prepend_14; // @[RegisterRouter.scala:87:24]
wire [63:0] _out_T_201 = _out_T_200; // @[RegisterRouter.scala:87:24]
wire [63:0] _out_out_bits_data_WIRE_1_1 = _out_T_201; // @[MuxLiteral.scala:49:48]
wire _out_iindex_T = out_front_bits_index[0]; // @[RegisterRouter.scala:87:24]
wire _out_oindex_T = out_front_bits_index[0]; // @[RegisterRouter.scala:87:24]
wire _out_iindex_T_1 = out_front_bits_index[1]; // @[RegisterRouter.scala:87:24]
wire _out_oindex_T_1 = out_front_bits_index[1]; // @[RegisterRouter.scala:87:24]
wire _out_iindex_T_2 = out_front_bits_index[2]; // @[RegisterRouter.scala:87:24]
wire _out_oindex_T_2 = out_front_bits_index[2]; // @[RegisterRouter.scala:87:24]
wire _out_iindex_T_3 = out_front_bits_index[3]; // @[RegisterRouter.scala:87:24]
wire _out_oindex_T_3 = out_front_bits_index[3]; // @[RegisterRouter.scala:87:24]
wire _out_iindex_T_4 = out_front_bits_index[4]; // @[RegisterRouter.scala:87:24]
wire _out_oindex_T_4 = out_front_bits_index[4]; // @[RegisterRouter.scala:87:24]
wire _out_iindex_T_5 = out_front_bits_index[5]; // @[RegisterRouter.scala:87:24]
wire _out_oindex_T_5 = out_front_bits_index[5]; // @[RegisterRouter.scala:87:24]
wire _out_iindex_T_6 = out_front_bits_index[6]; // @[RegisterRouter.scala:87:24]
wire _out_oindex_T_6 = out_front_bits_index[6]; // @[RegisterRouter.scala:87:24]
wire _out_iindex_T_7 = out_front_bits_index[7]; // @[RegisterRouter.scala:87:24]
wire _out_oindex_T_7 = out_front_bits_index[7]; // @[RegisterRouter.scala:87:24]
wire _out_iindex_T_8 = out_front_bits_index[8]; // @[RegisterRouter.scala:87:24]
wire _out_oindex_T_8 = out_front_bits_index[8]; // @[RegisterRouter.scala:87:24]
wire _out_iindex_T_9 = out_front_bits_index[9]; // @[RegisterRouter.scala:87:24]
wire _out_oindex_T_9 = out_front_bits_index[9]; // @[RegisterRouter.scala:87:24]
wire _out_iindex_T_10 = out_front_bits_index[10]; // @[RegisterRouter.scala:87:24]
wire _out_oindex_T_10 = out_front_bits_index[10]; // @[RegisterRouter.scala:87:24]
wire _out_iindex_T_11 = out_front_bits_index[11]; // @[RegisterRouter.scala:87:24]
wire _out_oindex_T_11 = out_front_bits_index[11]; // @[RegisterRouter.scala:87:24]
wire _out_iindex_T_12 = out_front_bits_index[12]; // @[RegisterRouter.scala:87:24]
wire _out_oindex_T_12 = out_front_bits_index[12]; // @[RegisterRouter.scala:87:24]
wire [1:0] out_iindex = {_out_iindex_T_12, _out_iindex_T_11}; // @[RegisterRouter.scala:87:24]
wire [1:0] out_oindex = {_out_oindex_T_12, _out_oindex_T_11}; // @[RegisterRouter.scala:87:24]
wire [3:0] _out_frontSel_T = 4'h1 << out_iindex; // @[OneHot.scala:58:35]
wire out_frontSel_0 = _out_frontSel_T[0]; // @[OneHot.scala:58:35]
wire out_frontSel_1 = _out_frontSel_T[1]; // @[OneHot.scala:58:35]
wire out_frontSel_2 = _out_frontSel_T[2]; // @[OneHot.scala:58:35]
wire out_frontSel_3 = _out_frontSel_T[3]; // @[OneHot.scala:58:35]
wire [3:0] _out_backSel_T = 4'h1 << out_oindex; // @[OneHot.scala:58:35]
wire out_backSel_0 = _out_backSel_T[0]; // @[OneHot.scala:58:35]
wire out_backSel_1 = _out_backSel_T[1]; // @[OneHot.scala:58:35]
wire out_backSel_2 = _out_backSel_T[2]; // @[OneHot.scala:58:35]
wire out_backSel_3 = _out_backSel_T[3]; // @[OneHot.scala:58:35]
wire _GEN_2 = in_valid & out_front_ready; // @[RegisterRouter.scala:73:18, :87:24]
wire _out_rifireMux_T; // @[RegisterRouter.scala:87:24]
assign _out_rifireMux_T = _GEN_2; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_T; // @[RegisterRouter.scala:87:24]
assign _out_wifireMux_T = _GEN_2; // @[RegisterRouter.scala:87:24]
wire _out_rifireMux_T_1 = _out_rifireMux_T & out_front_bits_read; // @[RegisterRouter.scala:87:24]
wire _out_rifireMux_T_2 = _out_rifireMux_T_1 & out_frontSel_0; // @[RegisterRouter.scala:87:24]
assign _out_rifireMux_T_3 = _out_rifireMux_T_2 & _out_T; // @[RegisterRouter.scala:87:24]
assign out_rivalid_0 = _out_rifireMux_T_3; // @[RegisterRouter.scala:87:24]
assign out_rivalid_1 = _out_rifireMux_T_3; // @[RegisterRouter.scala:87:24]
wire _out_rifireMux_T_4 = ~_out_T; // @[RegisterRouter.scala:87:24]
wire _out_rifireMux_T_6 = _out_rifireMux_T_1 & out_frontSel_1; // @[RegisterRouter.scala:87:24]
assign _out_rifireMux_T_7 = _out_rifireMux_T_6 & _out_T_4; // @[RegisterRouter.scala:87:24]
assign out_rivalid_10 = _out_rifireMux_T_7; // @[RegisterRouter.scala:87:24]
assign out_rivalid_11 = _out_rifireMux_T_7; // @[RegisterRouter.scala:87:24]
assign out_rivalid_12 = _out_rifireMux_T_7; // @[RegisterRouter.scala:87:24]
assign out_rivalid_13 = _out_rifireMux_T_7; // @[RegisterRouter.scala:87:24]
assign out_rivalid_14 = _out_rifireMux_T_7; // @[RegisterRouter.scala:87:24]
assign out_rivalid_15 = _out_rifireMux_T_7; // @[RegisterRouter.scala:87:24]
assign out_rivalid_16 = _out_rifireMux_T_7; // @[RegisterRouter.scala:87:24]
assign out_rivalid_17 = _out_rifireMux_T_7; // @[RegisterRouter.scala:87:24]
wire _out_rifireMux_T_8 = ~_out_T_4; // @[RegisterRouter.scala:87:24]
wire _out_rifireMux_T_10 = _out_rifireMux_T_1 & out_frontSel_2; // @[RegisterRouter.scala:87:24]
assign _out_rifireMux_T_11 = _out_rifireMux_T_10 & _out_T_2; // @[RegisterRouter.scala:87:24]
assign out_rivalid_2 = _out_rifireMux_T_11; // @[RegisterRouter.scala:87:24]
assign out_rivalid_3 = _out_rifireMux_T_11; // @[RegisterRouter.scala:87:24]
assign out_rivalid_4 = _out_rifireMux_T_11; // @[RegisterRouter.scala:87:24]
assign out_rivalid_5 = _out_rifireMux_T_11; // @[RegisterRouter.scala:87:24]
assign out_rivalid_6 = _out_rifireMux_T_11; // @[RegisterRouter.scala:87:24]
assign out_rivalid_7 = _out_rifireMux_T_11; // @[RegisterRouter.scala:87:24]
assign out_rivalid_8 = _out_rifireMux_T_11; // @[RegisterRouter.scala:87:24]
assign out_rivalid_9 = _out_rifireMux_T_11; // @[RegisterRouter.scala:87:24]
wire _out_rifireMux_T_12 = ~_out_T_2; // @[RegisterRouter.scala:87:24]
wire _out_rifireMux_T_14 = _out_rifireMux_T_1 & out_frontSel_3; // @[RegisterRouter.scala:87:24]
wire _out_rifireMux_T_15 = _out_rifireMux_T_14; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_T_1 = ~out_front_bits_read; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_T_2 = _out_wifireMux_T & _out_wifireMux_T_1; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_T_3 = _out_wifireMux_T_2 & out_frontSel_0; // @[RegisterRouter.scala:87:24]
assign _out_wifireMux_T_4 = _out_wifireMux_T_3 & _out_T; // @[RegisterRouter.scala:87:24]
assign out_wivalid_0 = _out_wifireMux_T_4; // @[RegisterRouter.scala:87:24]
assign out_wivalid_1 = _out_wifireMux_T_4; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_T_5 = ~_out_T; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_T_7 = _out_wifireMux_T_2 & out_frontSel_1; // @[RegisterRouter.scala:87:24]
assign _out_wifireMux_T_8 = _out_wifireMux_T_7 & _out_T_4; // @[RegisterRouter.scala:87:24]
assign out_wivalid_10 = _out_wifireMux_T_8; // @[RegisterRouter.scala:87:24]
assign out_wivalid_11 = _out_wifireMux_T_8; // @[RegisterRouter.scala:87:24]
assign out_wivalid_12 = _out_wifireMux_T_8; // @[RegisterRouter.scala:87:24]
assign out_wivalid_13 = _out_wifireMux_T_8; // @[RegisterRouter.scala:87:24]
assign out_wivalid_14 = _out_wifireMux_T_8; // @[RegisterRouter.scala:87:24]
assign out_wivalid_15 = _out_wifireMux_T_8; // @[RegisterRouter.scala:87:24]
assign out_wivalid_16 = _out_wifireMux_T_8; // @[RegisterRouter.scala:87:24]
assign out_wivalid_17 = _out_wifireMux_T_8; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_T_9 = ~_out_T_4; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_T_11 = _out_wifireMux_T_2 & out_frontSel_2; // @[RegisterRouter.scala:87:24]
assign _out_wifireMux_T_12 = _out_wifireMux_T_11 & _out_T_2; // @[RegisterRouter.scala:87:24]
assign out_wivalid_2 = _out_wifireMux_T_12; // @[RegisterRouter.scala:87:24]
assign out_wivalid_3 = _out_wifireMux_T_12; // @[RegisterRouter.scala:87:24]
assign out_wivalid_4 = _out_wifireMux_T_12; // @[RegisterRouter.scala:87:24]
assign out_wivalid_5 = _out_wifireMux_T_12; // @[RegisterRouter.scala:87:24]
assign out_wivalid_6 = _out_wifireMux_T_12; // @[RegisterRouter.scala:87:24]
assign out_wivalid_7 = _out_wifireMux_T_12; // @[RegisterRouter.scala:87:24]
assign out_wivalid_8 = _out_wifireMux_T_12; // @[RegisterRouter.scala:87:24]
assign out_wivalid_9 = _out_wifireMux_T_12; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_T_13 = ~_out_T_2; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_T_15 = _out_wifireMux_T_2 & out_frontSel_3; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_T_16 = _out_wifireMux_T_15; // @[RegisterRouter.scala:87:24]
wire _GEN_3 = out_front_valid & out_ready; // @[RegisterRouter.scala:87:24]
wire _out_rofireMux_T; // @[RegisterRouter.scala:87:24]
assign _out_rofireMux_T = _GEN_3; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_T; // @[RegisterRouter.scala:87:24]
assign _out_wofireMux_T = _GEN_3; // @[RegisterRouter.scala:87:24]
wire _out_rofireMux_T_1 = _out_rofireMux_T & out_front_bits_read; // @[RegisterRouter.scala:87:24]
wire _out_rofireMux_T_2 = _out_rofireMux_T_1 & out_backSel_0; // @[RegisterRouter.scala:87:24]
assign _out_rofireMux_T_3 = _out_rofireMux_T_2 & _out_T_1; // @[RegisterRouter.scala:87:24]
assign out_roready_0 = _out_rofireMux_T_3; // @[RegisterRouter.scala:87:24]
assign out_roready_1 = _out_rofireMux_T_3; // @[RegisterRouter.scala:87:24]
wire _out_rofireMux_T_4 = ~_out_T_1; // @[RegisterRouter.scala:87:24]
wire _out_rofireMux_T_6 = _out_rofireMux_T_1 & out_backSel_1; // @[RegisterRouter.scala:87:24]
assign _out_rofireMux_T_7 = _out_rofireMux_T_6 & _out_T_5; // @[RegisterRouter.scala:87:24]
assign out_roready_10 = _out_rofireMux_T_7; // @[RegisterRouter.scala:87:24]
assign out_roready_11 = _out_rofireMux_T_7; // @[RegisterRouter.scala:87:24]
assign out_roready_12 = _out_rofireMux_T_7; // @[RegisterRouter.scala:87:24]
assign out_roready_13 = _out_rofireMux_T_7; // @[RegisterRouter.scala:87:24]
assign out_roready_14 = _out_rofireMux_T_7; // @[RegisterRouter.scala:87:24]
assign out_roready_15 = _out_rofireMux_T_7; // @[RegisterRouter.scala:87:24]
assign out_roready_16 = _out_rofireMux_T_7; // @[RegisterRouter.scala:87:24]
assign out_roready_17 = _out_rofireMux_T_7; // @[RegisterRouter.scala:87:24]
wire _out_rofireMux_T_8 = ~_out_T_5; // @[RegisterRouter.scala:87:24]
wire _out_rofireMux_T_10 = _out_rofireMux_T_1 & out_backSel_2; // @[RegisterRouter.scala:87:24]
assign _out_rofireMux_T_11 = _out_rofireMux_T_10 & _out_T_3; // @[RegisterRouter.scala:87:24]
assign out_roready_2 = _out_rofireMux_T_11; // @[RegisterRouter.scala:87:24]
assign out_roready_3 = _out_rofireMux_T_11; // @[RegisterRouter.scala:87:24]
assign out_roready_4 = _out_rofireMux_T_11; // @[RegisterRouter.scala:87:24]
assign out_roready_5 = _out_rofireMux_T_11; // @[RegisterRouter.scala:87:24]
assign out_roready_6 = _out_rofireMux_T_11; // @[RegisterRouter.scala:87:24]
assign out_roready_7 = _out_rofireMux_T_11; // @[RegisterRouter.scala:87:24]
assign out_roready_8 = _out_rofireMux_T_11; // @[RegisterRouter.scala:87:24]
assign out_roready_9 = _out_rofireMux_T_11; // @[RegisterRouter.scala:87:24]
wire _out_rofireMux_T_12 = ~_out_T_3; // @[RegisterRouter.scala:87:24]
wire _out_rofireMux_T_14 = _out_rofireMux_T_1 & out_backSel_3; // @[RegisterRouter.scala:87:24]
wire _out_rofireMux_T_15 = _out_rofireMux_T_14; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_T_1 = ~out_front_bits_read; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_T_2 = _out_wofireMux_T & _out_wofireMux_T_1; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_T_3 = _out_wofireMux_T_2 & out_backSel_0; // @[RegisterRouter.scala:87:24]
assign _out_wofireMux_T_4 = _out_wofireMux_T_3 & _out_T_1; // @[RegisterRouter.scala:87:24]
assign out_woready_0 = _out_wofireMux_T_4; // @[RegisterRouter.scala:87:24]
assign out_woready_1 = _out_wofireMux_T_4; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_T_5 = ~_out_T_1; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_T_7 = _out_wofireMux_T_2 & out_backSel_1; // @[RegisterRouter.scala:87:24]
assign _out_wofireMux_T_8 = _out_wofireMux_T_7 & _out_T_5; // @[RegisterRouter.scala:87:24]
assign out_woready_10 = _out_wofireMux_T_8; // @[RegisterRouter.scala:87:24]
assign out_woready_11 = _out_wofireMux_T_8; // @[RegisterRouter.scala:87:24]
assign out_woready_12 = _out_wofireMux_T_8; // @[RegisterRouter.scala:87:24]
assign out_woready_13 = _out_wofireMux_T_8; // @[RegisterRouter.scala:87:24]
assign out_woready_14 = _out_wofireMux_T_8; // @[RegisterRouter.scala:87:24]
assign out_woready_15 = _out_wofireMux_T_8; // @[RegisterRouter.scala:87:24]
assign out_woready_16 = _out_wofireMux_T_8; // @[RegisterRouter.scala:87:24]
assign out_woready_17 = _out_wofireMux_T_8; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_T_9 = ~_out_T_5; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_T_11 = _out_wofireMux_T_2 & out_backSel_2; // @[RegisterRouter.scala:87:24]
assign _out_wofireMux_T_12 = _out_wofireMux_T_11 & _out_T_3; // @[RegisterRouter.scala:87:24]
assign out_woready_2 = _out_wofireMux_T_12; // @[RegisterRouter.scala:87:24]
assign out_woready_3 = _out_wofireMux_T_12; // @[RegisterRouter.scala:87:24]
assign out_woready_4 = _out_wofireMux_T_12; // @[RegisterRouter.scala:87:24]
assign out_woready_5 = _out_wofireMux_T_12; // @[RegisterRouter.scala:87:24]
assign out_woready_6 = _out_wofireMux_T_12; // @[RegisterRouter.scala:87:24]
assign out_woready_7 = _out_wofireMux_T_12; // @[RegisterRouter.scala:87:24]
assign out_woready_8 = _out_wofireMux_T_12; // @[RegisterRouter.scala:87:24]
assign out_woready_9 = _out_wofireMux_T_12; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_T_13 = ~_out_T_3; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_T_15 = _out_wofireMux_T_2 & out_backSel_3; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_T_16 = _out_wofireMux_T_15; // @[RegisterRouter.scala:87:24]
assign in_ready = _out_in_ready_T; // @[RegisterRouter.scala:73:18, :87:24]
assign out_front_valid = _out_front_valid_T; // @[RegisterRouter.scala:87:24]
assign out_front_ready = _out_front_ready_T; // @[RegisterRouter.scala:87:24]
assign out_valid = _out_out_valid_T; // @[RegisterRouter.scala:87:24]
wire [3:0] _GEN_4 = {{1'h1}, {_out_out_bits_data_WIRE_2}, {_out_out_bits_data_WIRE_1}, {_out_out_bits_data_WIRE_0}}; // @[MuxLiteral.scala:49:{10,48}]
wire _out_out_bits_data_T_1 = _GEN_4[out_oindex]; // @[MuxLiteral.scala:49:10]
wire [63:0] _out_out_bits_data_WIRE_1_0 = {32'h0, _out_T_25}; // @[MuxLiteral.scala:49:48]
wire [3:0][63:0] _GEN_5 = {{64'h0}, {_out_out_bits_data_WIRE_1_2}, {_out_out_bits_data_WIRE_1_1}, {_out_out_bits_data_WIRE_1_0}}; // @[MuxLiteral.scala:49:{10,48}]
wire [63:0] _out_out_bits_data_T_3 = _GEN_5[out_oindex]; // @[MuxLiteral.scala:49:10]
assign _out_out_bits_data_T_4 = _out_out_bits_data_T_1 ? _out_out_bits_data_T_3 : 64'h0; // @[MuxLiteral.scala:49:10]
assign out_bits_data = _out_out_bits_data_T_4; // @[RegisterRouter.scala:87:24]
assign nodeIn_d_bits_size = nodeIn_d_bits_d_size; // @[Edges.scala:792:17]
assign nodeIn_d_bits_source = nodeIn_d_bits_d_source; // @[Edges.scala:792:17]
assign nodeIn_d_bits_opcode = {2'h0, _nodeIn_d_bits_opcode_T}; // @[RegisterRouter.scala:105:{19,25}]
always @(posedge clock) begin // @[CLINT.scala:65:9]
if (reset) begin // @[CLINT.scala:65:9]
time_0 <= 64'h0; // @[CLINT.scala:73:23]
ipi_0 <= 1'h0; // @[CLINT.scala:78:41]
end
else begin // @[CLINT.scala:65:9]
if (valids_1_0 | valids_1_1 | valids_1_2 | valids_1_3 | valids_1_4 | valids_1_5 | valids_1_6 | valids_1_7) // @[RegField.scala:153:29, :154:27]
time_0 <= _time_T_2; // @[RegField.scala:154:52]
else if (io_rtcTick_0) // @[CLINT.scala:65:9]
time_0 <= _time_T_1; // @[CLINT.scala:73:23, :74:38]
if (out_f_woready) // @[RegisterRouter.scala:87:24]
ipi_0 <= _out_T_6; // @[RegisterRouter.scala:87:24]
end
if (valids_0 | valids_1 | valids_2 | valids_3 | valids_4 | valids_5 | valids_6 | valids_7) // @[RegField.scala:153:29, :154:27]
timecmp_0 <= _timecmp_0_T; // @[RegField.scala:154:52]
always @(posedge)
TLMonitor_95 monitor ( // @[Nodes.scala:27:25]
.clock (clock),
.reset (reset),
.io_in_a_ready (nodeIn_a_ready), // @[MixedNode.scala:551:17]
.io_in_a_valid (nodeIn_a_valid), // @[MixedNode.scala:551:17]
.io_in_a_bits_opcode (nodeIn_a_bits_opcode), // @[MixedNode.scala:551:17]
.io_in_a_bits_param (nodeIn_a_bits_param), // @[MixedNode.scala:551:17]
.io_in_a_bits_size (nodeIn_a_bits_size), // @[MixedNode.scala:551:17]
.io_in_a_bits_source (nodeIn_a_bits_source), // @[MixedNode.scala:551:17]
.io_in_a_bits_address (nodeIn_a_bits_address), // @[MixedNode.scala:551:17]
.io_in_a_bits_mask (nodeIn_a_bits_mask), // @[MixedNode.scala:551:17]
.io_in_a_bits_data (nodeIn_a_bits_data), // @[MixedNode.scala:551:17]
.io_in_a_bits_corrupt (nodeIn_a_bits_corrupt), // @[MixedNode.scala:551:17]
.io_in_d_ready (nodeIn_d_ready), // @[MixedNode.scala:551:17]
.io_in_d_valid (nodeIn_d_valid), // @[MixedNode.scala:551:17]
.io_in_d_bits_opcode (nodeIn_d_bits_opcode), // @[MixedNode.scala:551:17]
.io_in_d_bits_size (nodeIn_d_bits_size), // @[MixedNode.scala:551:17]
.io_in_d_bits_source (nodeIn_d_bits_source), // @[MixedNode.scala:551:17]
.io_in_d_bits_data (nodeIn_d_bits_data) // @[MixedNode.scala:551:17]
); // @[Nodes.scala:27:25]
assign auto_int_out_0 = auto_int_out_0_0; // @[CLINT.scala:65:9]
assign auto_int_out_1 = auto_int_out_1_0; // @[CLINT.scala:65:9]
assign auto_in_a_ready = auto_in_a_ready_0; // @[CLINT.scala:65:9]
assign auto_in_d_valid = auto_in_d_valid_0; // @[CLINT.scala:65:9]
assign auto_in_d_bits_opcode = auto_in_d_bits_opcode_0; // @[CLINT.scala:65:9]
assign auto_in_d_bits_size = auto_in_d_bits_size_0; // @[CLINT.scala:65:9]
assign auto_in_d_bits_source = auto_in_d_bits_source_0; // @[CLINT.scala:65:9]
assign auto_in_d_bits_data = auto_in_d_bits_data_0; // @[CLINT.scala:65:9]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftRegisterPriorityQueue.scala:
package compressacc
import chisel3._
import chisel3.util._
import chisel3.util._
// TODO : support enq & deq at the same cycle
class PriorityQueueStageIO(keyWidth: Int, value: ValueInfo) extends Bundle {
val output_prev = KeyValue(keyWidth, value)
val output_nxt = KeyValue(keyWidth, value)
val input_prev = Flipped(KeyValue(keyWidth, value))
val input_nxt = Flipped(KeyValue(keyWidth, value))
val cmd = Flipped(Valid(UInt(1.W)))
val insert_here = Input(Bool())
val cur_input_keyval = Flipped(KeyValue(keyWidth, value))
val cur_output_keyval = KeyValue(keyWidth, value)
}
class PriorityQueueStage(keyWidth: Int, value: ValueInfo) extends Module {
val io = IO(new PriorityQueueStageIO(keyWidth, value))
dontTouch(io)
val CMD_DEQ = 0.U
val CMD_ENQ = 1.U
val MAX_VALUE = (1 << keyWidth) - 1
val key_reg = RegInit(MAX_VALUE.U(keyWidth.W))
val value_reg = Reg(value)
io.output_prev.key := key_reg
io.output_prev.value := value_reg
io.output_nxt.key := key_reg
io.output_nxt.value := value_reg
io.cur_output_keyval.key := key_reg
io.cur_output_keyval.value := value_reg
when (io.cmd.valid) {
switch (io.cmd.bits) {
is (CMD_DEQ) {
key_reg := io.input_nxt.key
value_reg := io.input_nxt.value
}
is (CMD_ENQ) {
when (io.insert_here) {
key_reg := io.cur_input_keyval.key
value_reg := io.cur_input_keyval.value
} .elsewhen (key_reg >= io.cur_input_keyval.key) {
key_reg := io.input_prev.key
value_reg := io.input_prev.value
} .otherwise {
// do nothing
}
}
}
}
}
object PriorityQueueStage {
def apply(keyWidth: Int, v: ValueInfo): PriorityQueueStage = new PriorityQueueStage(keyWidth, v)
}
// TODO
// - This design is not scalable as the enqued_keyval is broadcasted to all the stages
// - Add pipeline registers later
class PriorityQueueIO(queSize: Int, keyWidth: Int, value: ValueInfo) extends Bundle {
val cnt_bits = log2Ceil(queSize+1)
val counter = Output(UInt(cnt_bits.W))
val enq = Flipped(Decoupled(KeyValue(keyWidth, value)))
val deq = Decoupled(KeyValue(keyWidth, value))
}
class PriorityQueue(queSize: Int, keyWidth: Int, value: ValueInfo) extends Module {
val keyWidthInternal = keyWidth + 1
val CMD_DEQ = 0.U
val CMD_ENQ = 1.U
val io = IO(new PriorityQueueIO(queSize, keyWidthInternal, value))
dontTouch(io)
val MAX_VALUE = ((1 << keyWidthInternal) - 1).U
val cnt_bits = log2Ceil(queSize+1)
// do not consider cases where we are inserting more entries then the queSize
val counter = RegInit(0.U(cnt_bits.W))
io.counter := counter
val full = (counter === queSize.U)
val empty = (counter === 0.U)
io.deq.valid := !empty
io.enq.ready := !full
when (io.enq.fire) {
counter := counter + 1.U
}
when (io.deq.fire) {
counter := counter - 1.U
}
val cmd_valid = io.enq.valid || io.deq.ready
val cmd = Mux(io.enq.valid, CMD_ENQ, CMD_DEQ)
assert(!(io.enq.valid && io.deq.ready))
val stages = Seq.fill(queSize)(Module(new PriorityQueueStage(keyWidthInternal, value)))
for (i <- 0 until (queSize - 1)) {
stages(i+1).io.input_prev <> stages(i).io.output_nxt
stages(i).io.input_nxt <> stages(i+1).io.output_prev
}
stages(queSize-1).io.input_nxt.key := MAX_VALUE
// stages(queSize-1).io.input_nxt.value :=
stages(queSize-1).io.input_nxt.value.symbol := 0.U
// stages(queSize-1).io.input_nxt.value.child(0) := 0.U
// stages(queSize-1).io.input_nxt.value.child(1) := 0.U
stages(0).io.input_prev.key := io.enq.bits.key
stages(0).io.input_prev.value <> io.enq.bits.value
for (i <- 0 until queSize) {
stages(i).io.cmd.valid := cmd_valid
stages(i).io.cmd.bits := cmd
stages(i).io.cur_input_keyval <> io.enq.bits
}
val is_large_or_equal = WireInit(VecInit(Seq.fill(queSize)(false.B)))
for (i <- 0 until queSize) {
is_large_or_equal(i) := (stages(i).io.cur_output_keyval.key >= io.enq.bits.key)
}
val is_large_or_equal_cat = Wire(UInt(queSize.W))
is_large_or_equal_cat := Cat(is_large_or_equal.reverse)
val insert_here_idx = PriorityEncoder(is_large_or_equal_cat)
for (i <- 0 until queSize) {
when (i.U === insert_here_idx) {
stages(i).io.insert_here := true.B
} .otherwise {
stages(i).io.insert_here := false.B
}
}
io.deq.bits <> stages(0).io.output_prev
}
| module PriorityQueueStage_108( // @[ShiftRegisterPriorityQueue.scala:21:7]
input clock, // @[ShiftRegisterPriorityQueue.scala:21:7]
input reset, // @[ShiftRegisterPriorityQueue.scala:21:7]
output [30:0] io_output_prev_key, // @[ShiftRegisterPriorityQueue.scala:22:14]
output [9:0] io_output_prev_value_symbol, // @[ShiftRegisterPriorityQueue.scala:22:14]
output [30:0] io_output_nxt_key, // @[ShiftRegisterPriorityQueue.scala:22:14]
output [9:0] io_output_nxt_value_symbol, // @[ShiftRegisterPriorityQueue.scala:22:14]
input [30:0] io_input_prev_key, // @[ShiftRegisterPriorityQueue.scala:22:14]
input [9:0] io_input_prev_value_symbol, // @[ShiftRegisterPriorityQueue.scala:22:14]
input [30:0] io_input_nxt_key, // @[ShiftRegisterPriorityQueue.scala:22:14]
input [9:0] io_input_nxt_value_symbol, // @[ShiftRegisterPriorityQueue.scala:22:14]
input io_cmd_valid, // @[ShiftRegisterPriorityQueue.scala:22:14]
input io_cmd_bits, // @[ShiftRegisterPriorityQueue.scala:22:14]
input io_insert_here, // @[ShiftRegisterPriorityQueue.scala:22:14]
input [30:0] io_cur_input_keyval_key, // @[ShiftRegisterPriorityQueue.scala:22:14]
input [9:0] io_cur_input_keyval_value_symbol, // @[ShiftRegisterPriorityQueue.scala:22:14]
output [30:0] io_cur_output_keyval_key, // @[ShiftRegisterPriorityQueue.scala:22:14]
output [9:0] io_cur_output_keyval_value_symbol // @[ShiftRegisterPriorityQueue.scala:22:14]
);
wire [30:0] io_input_prev_key_0 = io_input_prev_key; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [9:0] io_input_prev_value_symbol_0 = io_input_prev_value_symbol; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [30:0] io_input_nxt_key_0 = io_input_nxt_key; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [9:0] io_input_nxt_value_symbol_0 = io_input_nxt_value_symbol; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire io_cmd_valid_0 = io_cmd_valid; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire io_cmd_bits_0 = io_cmd_bits; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire io_insert_here_0 = io_insert_here; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [30:0] io_cur_input_keyval_key_0 = io_cur_input_keyval_key; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [9:0] io_cur_input_keyval_value_symbol_0 = io_cur_input_keyval_value_symbol; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [9:0] io_output_prev_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [30:0] io_output_prev_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [9:0] io_output_nxt_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [30:0] io_output_nxt_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [9:0] io_cur_output_keyval_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [30:0] io_cur_output_keyval_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
reg [30:0] key_reg; // @[ShiftRegisterPriorityQueue.scala:30:24]
assign io_output_prev_key_0 = key_reg; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24]
assign io_output_nxt_key_0 = key_reg; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24]
assign io_cur_output_keyval_key_0 = key_reg; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24]
reg [9:0] value_reg_symbol; // @[ShiftRegisterPriorityQueue.scala:31:22]
assign io_output_prev_value_symbol_0 = value_reg_symbol; // @[ShiftRegisterPriorityQueue.scala:21:7, :31:22]
assign io_output_nxt_value_symbol_0 = value_reg_symbol; // @[ShiftRegisterPriorityQueue.scala:21:7, :31:22]
assign io_cur_output_keyval_value_symbol_0 = value_reg_symbol; // @[ShiftRegisterPriorityQueue.scala:21:7, :31:22]
wire _T_2 = key_reg >= io_cur_input_keyval_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24, :52:30]
always @(posedge clock) begin // @[ShiftRegisterPriorityQueue.scala:21:7]
if (reset) // @[ShiftRegisterPriorityQueue.scala:21:7]
key_reg <= 31'h7FFFFFFF; // @[ShiftRegisterPriorityQueue.scala:30:24]
else if (io_cmd_valid_0) begin // @[ShiftRegisterPriorityQueue.scala:21:7]
if (io_cmd_bits_0) begin // @[ShiftRegisterPriorityQueue.scala:21:7]
if (io_insert_here_0) // @[ShiftRegisterPriorityQueue.scala:21:7]
key_reg <= io_cur_input_keyval_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24]
else if (_T_2) // @[ShiftRegisterPriorityQueue.scala:52:30]
key_reg <= io_input_prev_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24]
end
else // @[ShiftRegisterPriorityQueue.scala:21:7]
key_reg <= io_input_nxt_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24]
end
if (io_cmd_valid_0) begin // @[ShiftRegisterPriorityQueue.scala:21:7]
if (io_cmd_bits_0) begin // @[ShiftRegisterPriorityQueue.scala:21:7]
if (io_insert_here_0) // @[ShiftRegisterPriorityQueue.scala:21:7]
value_reg_symbol <= io_cur_input_keyval_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :31:22]
else if (_T_2) // @[ShiftRegisterPriorityQueue.scala:52:30]
value_reg_symbol <= io_input_prev_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :31:22]
end
else // @[ShiftRegisterPriorityQueue.scala:21:7]
value_reg_symbol <= io_input_nxt_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :31:22]
end
always @(posedge)
assign io_output_prev_key = io_output_prev_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
assign io_output_prev_value_symbol = io_output_prev_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
assign io_output_nxt_key = io_output_nxt_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
assign io_output_nxt_value_symbol = io_output_nxt_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
assign io_cur_output_keyval_key = io_cur_output_keyval_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
assign io_cur_output_keyval_value_symbol = io_cur_output_keyval_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Tile.scala:
// See README.md for license details.
package gemmini
import chisel3._
import chisel3.util._
import Util._
/**
* A Tile is a purely combinational 2D array of passThrough PEs.
* a, b, s, and in_propag are broadcast across the entire array and are passed through to the Tile's outputs
* @param width The data width of each PE in bits
* @param rows Number of PEs on each row
* @param columns Number of PEs on each column
*/
class Tile[T <: Data](inputType: T, outputType: T, accType: T, df: Dataflow.Value, tree_reduction: Boolean, max_simultaneous_matmuls: Int, val rows: Int, val columns: Int)(implicit ev: Arithmetic[T]) extends Module {
val io = IO(new Bundle {
val in_a = Input(Vec(rows, inputType))
val in_b = Input(Vec(columns, outputType)) // This is the output of the tile next to it
val in_d = Input(Vec(columns, outputType))
val in_control = Input(Vec(columns, new PEControl(accType)))
val in_id = Input(Vec(columns, UInt(log2Up(max_simultaneous_matmuls).W)))
val in_last = Input(Vec(columns, Bool()))
val out_a = Output(Vec(rows, inputType))
val out_c = Output(Vec(columns, outputType))
val out_b = Output(Vec(columns, outputType))
val out_control = Output(Vec(columns, new PEControl(accType)))
val out_id = Output(Vec(columns, UInt(log2Up(max_simultaneous_matmuls).W)))
val out_last = Output(Vec(columns, Bool()))
val in_valid = Input(Vec(columns, Bool()))
val out_valid = Output(Vec(columns, Bool()))
val bad_dataflow = Output(Bool())
})
import ev._
val tile = Seq.fill(rows, columns)(Module(new PE(inputType, outputType, accType, df, max_simultaneous_matmuls)))
val tileT = tile.transpose
// TODO: abstract hori/vert broadcast, all these connections look the same
// Broadcast 'a' horizontally across the Tile
for (r <- 0 until rows) {
tile(r).foldLeft(io.in_a(r)) {
case (in_a, pe) =>
pe.io.in_a := in_a
pe.io.out_a
}
}
// Broadcast 'b' vertically across the Tile
for (c <- 0 until columns) {
tileT(c).foldLeft(io.in_b(c)) {
case (in_b, pe) =>
pe.io.in_b := (if (tree_reduction) in_b.zero else in_b)
pe.io.out_b
}
}
// Broadcast 'd' vertically across the Tile
for (c <- 0 until columns) {
tileT(c).foldLeft(io.in_d(c)) {
case (in_d, pe) =>
pe.io.in_d := in_d
pe.io.out_c
}
}
// Broadcast 'control' vertically across the Tile
for (c <- 0 until columns) {
tileT(c).foldLeft(io.in_control(c)) {
case (in_ctrl, pe) =>
pe.io.in_control := in_ctrl
pe.io.out_control
}
}
// Broadcast 'garbage' vertically across the Tile
for (c <- 0 until columns) {
tileT(c).foldLeft(io.in_valid(c)) {
case (v, pe) =>
pe.io.in_valid := v
pe.io.out_valid
}
}
// Broadcast 'id' vertically across the Tile
for (c <- 0 until columns) {
tileT(c).foldLeft(io.in_id(c)) {
case (id, pe) =>
pe.io.in_id := id
pe.io.out_id
}
}
// Broadcast 'last' vertically across the Tile
for (c <- 0 until columns) {
tileT(c).foldLeft(io.in_last(c)) {
case (last, pe) =>
pe.io.in_last := last
pe.io.out_last
}
}
// Drive the Tile's bottom IO
for (c <- 0 until columns) {
io.out_c(c) := tile(rows-1)(c).io.out_c
io.out_control(c) := tile(rows-1)(c).io.out_control
io.out_id(c) := tile(rows-1)(c).io.out_id
io.out_last(c) := tile(rows-1)(c).io.out_last
io.out_valid(c) := tile(rows-1)(c).io.out_valid
io.out_b(c) := {
if (tree_reduction) {
val prods = tileT(c).map(_.io.out_b)
accumulateTree(prods :+ io.in_b(c))
} else {
tile(rows - 1)(c).io.out_b
}
}
}
io.bad_dataflow := tile.map(_.map(_.io.bad_dataflow).reduce(_||_)).reduce(_||_)
// Drive the Tile's right IO
for (r <- 0 until rows) {
io.out_a(r) := tile(r)(columns-1).io.out_a
}
}
| module Tile_151( // @[Tile.scala:16:7]
input clock, // @[Tile.scala:16:7]
input reset, // @[Tile.scala:16:7]
input [7:0] io_in_a_0, // @[Tile.scala:17:14]
input [19:0] io_in_b_0, // @[Tile.scala:17:14]
input [19:0] io_in_d_0, // @[Tile.scala:17:14]
input io_in_control_0_dataflow, // @[Tile.scala:17:14]
input io_in_control_0_propagate, // @[Tile.scala:17:14]
input [4:0] io_in_control_0_shift, // @[Tile.scala:17:14]
input [2:0] io_in_id_0, // @[Tile.scala:17:14]
input io_in_last_0, // @[Tile.scala:17:14]
output [7:0] io_out_a_0, // @[Tile.scala:17:14]
output [19:0] io_out_c_0, // @[Tile.scala:17:14]
output [19:0] io_out_b_0, // @[Tile.scala:17:14]
output io_out_control_0_dataflow, // @[Tile.scala:17:14]
output io_out_control_0_propagate, // @[Tile.scala:17:14]
output [4:0] io_out_control_0_shift, // @[Tile.scala:17:14]
output [2:0] io_out_id_0, // @[Tile.scala:17:14]
output io_out_last_0, // @[Tile.scala:17:14]
input io_in_valid_0, // @[Tile.scala:17:14]
output io_out_valid_0, // @[Tile.scala:17:14]
output io_bad_dataflow // @[Tile.scala:17:14]
);
wire [7:0] io_in_a_0_0 = io_in_a_0; // @[Tile.scala:16:7]
wire [19:0] io_in_b_0_0 = io_in_b_0; // @[Tile.scala:16:7]
wire [19:0] io_in_d_0_0 = io_in_d_0; // @[Tile.scala:16:7]
wire io_in_control_0_dataflow_0 = io_in_control_0_dataflow; // @[Tile.scala:16:7]
wire io_in_control_0_propagate_0 = io_in_control_0_propagate; // @[Tile.scala:16:7]
wire [4:0] io_in_control_0_shift_0 = io_in_control_0_shift; // @[Tile.scala:16:7]
wire [2:0] io_in_id_0_0 = io_in_id_0; // @[Tile.scala:16:7]
wire io_in_last_0_0 = io_in_last_0; // @[Tile.scala:16:7]
wire io_in_valid_0_0 = io_in_valid_0; // @[Tile.scala:16:7]
wire [7:0] io_out_a_0_0; // @[Tile.scala:16:7]
wire [19:0] io_out_c_0_0; // @[Tile.scala:16:7]
wire [19:0] io_out_b_0_0; // @[Tile.scala:16:7]
wire io_out_control_0_dataflow_0; // @[Tile.scala:16:7]
wire io_out_control_0_propagate_0; // @[Tile.scala:16:7]
wire [4:0] io_out_control_0_shift_0; // @[Tile.scala:16:7]
wire [2:0] io_out_id_0_0; // @[Tile.scala:16:7]
wire io_out_last_0_0; // @[Tile.scala:16:7]
wire io_out_valid_0_0; // @[Tile.scala:16:7]
wire io_bad_dataflow_0; // @[Tile.scala:16:7]
PE_407 tile_0_0 ( // @[Tile.scala:42:44]
.clock (clock),
.reset (reset),
.io_in_a (io_in_a_0_0), // @[Tile.scala:16:7]
.io_in_b (io_in_b_0_0), // @[Tile.scala:16:7]
.io_in_d (io_in_d_0_0), // @[Tile.scala:16:7]
.io_out_a (io_out_a_0_0),
.io_out_b (io_out_b_0_0),
.io_out_c (io_out_c_0_0),
.io_in_control_dataflow (io_in_control_0_dataflow_0), // @[Tile.scala:16:7]
.io_in_control_propagate (io_in_control_0_propagate_0), // @[Tile.scala:16:7]
.io_in_control_shift (io_in_control_0_shift_0), // @[Tile.scala:16:7]
.io_out_control_dataflow (io_out_control_0_dataflow_0),
.io_out_control_propagate (io_out_control_0_propagate_0),
.io_out_control_shift (io_out_control_0_shift_0),
.io_in_id (io_in_id_0_0), // @[Tile.scala:16:7]
.io_out_id (io_out_id_0_0),
.io_in_last (io_in_last_0_0), // @[Tile.scala:16:7]
.io_out_last (io_out_last_0_0),
.io_in_valid (io_in_valid_0_0), // @[Tile.scala:16:7]
.io_out_valid (io_out_valid_0_0),
.io_bad_dataflow (io_bad_dataflow_0)
); // @[Tile.scala:42:44]
assign io_out_a_0 = io_out_a_0_0; // @[Tile.scala:16:7]
assign io_out_c_0 = io_out_c_0_0; // @[Tile.scala:16:7]
assign io_out_b_0 = io_out_b_0_0; // @[Tile.scala:16:7]
assign io_out_control_0_dataflow = io_out_control_0_dataflow_0; // @[Tile.scala:16:7]
assign io_out_control_0_propagate = io_out_control_0_propagate_0; // @[Tile.scala:16:7]
assign io_out_control_0_shift = io_out_control_0_shift_0; // @[Tile.scala:16:7]
assign io_out_id_0 = io_out_id_0_0; // @[Tile.scala:16:7]
assign io_out_last_0 = io_out_last_0_0; // @[Tile.scala:16:7]
assign io_out_valid_0 = io_out_valid_0_0; // @[Tile.scala:16:7]
assign io_bad_dataflow = io_bad_dataflow_0; // @[Tile.scala:16:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File AsyncQueue.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
case class AsyncQueueParams(
depth: Int = 8,
sync: Int = 3,
safe: Boolean = true,
// If safe is true, then effort is made to resynchronize the crossing indices when either side is reset.
// This makes it safe/possible to reset one side of the crossing (but not the other) when the queue is empty.
narrow: Boolean = false)
// If narrow is true then the read mux is moved to the source side of the crossing.
// This reduces the number of level shifters in the case where the clock crossing is also a voltage crossing,
// at the expense of a combinational path from the sink to the source and back to the sink.
{
require (depth > 0 && isPow2(depth))
require (sync >= 2)
val bits = log2Ceil(depth)
val wires = if (narrow) 1 else depth
}
object AsyncQueueParams {
// When there is only one entry, we don't need narrow.
def singleton(sync: Int = 3, safe: Boolean = true) = AsyncQueueParams(1, sync, safe, false)
}
class AsyncBundleSafety extends Bundle {
val ridx_valid = Input (Bool())
val widx_valid = Output(Bool())
val source_reset_n = Output(Bool())
val sink_reset_n = Input (Bool())
}
class AsyncBundle[T <: Data](private val gen: T, val params: AsyncQueueParams = AsyncQueueParams()) extends Bundle {
// Data-path synchronization
val mem = Output(Vec(params.wires, gen))
val ridx = Input (UInt((params.bits+1).W))
val widx = Output(UInt((params.bits+1).W))
val index = params.narrow.option(Input(UInt(params.bits.W)))
// Signals used to self-stabilize a safe AsyncQueue
val safe = params.safe.option(new AsyncBundleSafety)
}
object GrayCounter {
def apply(bits: Int, increment: Bool = true.B, clear: Bool = false.B, name: String = "binary"): UInt = {
val incremented = Wire(UInt(bits.W))
val binary = RegNext(next=incremented, init=0.U).suggestName(name)
incremented := Mux(clear, 0.U, binary + increment.asUInt)
incremented ^ (incremented >> 1)
}
}
class AsyncValidSync(sync: Int, desc: String) extends RawModule {
val io = IO(new Bundle {
val in = Input(Bool())
val out = Output(Bool())
})
val clock = IO(Input(Clock()))
val reset = IO(Input(AsyncReset()))
withClockAndReset(clock, reset){
io.out := AsyncResetSynchronizerShiftReg(io.in, sync, Some(desc))
}
}
class AsyncQueueSource[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module {
override def desiredName = s"AsyncQueueSource_${gen.typeName}"
val io = IO(new Bundle {
// These come from the source domain
val enq = Flipped(Decoupled(gen))
// These cross to the sink clock domain
val async = new AsyncBundle(gen, params)
})
val bits = params.bits
val sink_ready = WireInit(true.B)
val mem = Reg(Vec(params.depth, gen)) // This does NOT need to be reset at all.
val widx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.enq.fire, !sink_ready, "widx_bin"))
val ridx = AsyncResetSynchronizerShiftReg(io.async.ridx, params.sync, Some("ridx_gray"))
val ready = sink_ready && widx =/= (ridx ^ (params.depth | params.depth >> 1).U)
val index = if (bits == 0) 0.U else io.async.widx(bits-1, 0) ^ (io.async.widx(bits, bits) << (bits-1))
when (io.enq.fire) { mem(index) := io.enq.bits }
val ready_reg = withReset(reset.asAsyncReset)(RegNext(next=ready, init=false.B).suggestName("ready_reg"))
io.enq.ready := ready_reg && sink_ready
val widx_reg = withReset(reset.asAsyncReset)(RegNext(next=widx, init=0.U).suggestName("widx_gray"))
io.async.widx := widx_reg
io.async.index match {
case Some(index) => io.async.mem(0) := mem(index)
case None => io.async.mem := mem
}
io.async.safe.foreach { sio =>
val source_valid_0 = Module(new AsyncValidSync(params.sync, "source_valid_0"))
val source_valid_1 = Module(new AsyncValidSync(params.sync, "source_valid_1"))
val sink_extend = Module(new AsyncValidSync(params.sync, "sink_extend"))
val sink_valid = Module(new AsyncValidSync(params.sync, "sink_valid"))
source_valid_0.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
source_valid_1.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
sink_extend .reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
sink_valid .reset := reset.asAsyncReset
source_valid_0.clock := clock
source_valid_1.clock := clock
sink_extend .clock := clock
sink_valid .clock := clock
source_valid_0.io.in := true.B
source_valid_1.io.in := source_valid_0.io.out
sio.widx_valid := source_valid_1.io.out
sink_extend.io.in := sio.ridx_valid
sink_valid.io.in := sink_extend.io.out
sink_ready := sink_valid.io.out
sio.source_reset_n := !reset.asBool
// Assert that if there is stuff in the queue, then reset cannot happen
// Impossible to write because dequeue can occur on the receiving side,
// then reset allowed to happen, but write side cannot know that dequeue
// occurred.
// TODO: write some sort of sanity check assertion for users
// that denote don't reset when there is activity
// assert (!(reset || !sio.sink_reset_n) || !io.enq.valid, "Enqueue while sink is reset and AsyncQueueSource is unprotected")
// assert (!reset_rise || prev_idx_match.asBool, "Sink reset while AsyncQueueSource not empty")
}
}
class AsyncQueueSink[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module {
override def desiredName = s"AsyncQueueSink_${gen.typeName}"
val io = IO(new Bundle {
// These come from the sink domain
val deq = Decoupled(gen)
// These cross to the source clock domain
val async = Flipped(new AsyncBundle(gen, params))
})
val bits = params.bits
val source_ready = WireInit(true.B)
val ridx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.deq.fire, !source_ready, "ridx_bin"))
val widx = AsyncResetSynchronizerShiftReg(io.async.widx, params.sync, Some("widx_gray"))
val valid = source_ready && ridx =/= widx
// The mux is safe because timing analysis ensures ridx has reached the register
// On an ASIC, changes to the unread location cannot affect the selected value
// On an FPGA, only one input changes at a time => mem updates don't cause glitches
// The register only latches when the selected valued is not being written
val index = if (bits == 0) 0.U else ridx(bits-1, 0) ^ (ridx(bits, bits) << (bits-1))
io.async.index.foreach { _ := index }
// This register does not NEED to be reset, as its contents will not
// be considered unless the asynchronously reset deq valid register is set.
// It is possible that bits latches when the source domain is reset / has power cut
// This is safe, because isolation gates brought mem low before the zeroed widx reached us
val deq_bits_nxt = io.async.mem(if (params.narrow) 0.U else index)
io.deq.bits := ClockCrossingReg(deq_bits_nxt, en = valid, doInit = false, name = Some("deq_bits_reg"))
val valid_reg = withReset(reset.asAsyncReset)(RegNext(next=valid, init=false.B).suggestName("valid_reg"))
io.deq.valid := valid_reg && source_ready
val ridx_reg = withReset(reset.asAsyncReset)(RegNext(next=ridx, init=0.U).suggestName("ridx_gray"))
io.async.ridx := ridx_reg
io.async.safe.foreach { sio =>
val sink_valid_0 = Module(new AsyncValidSync(params.sync, "sink_valid_0"))
val sink_valid_1 = Module(new AsyncValidSync(params.sync, "sink_valid_1"))
val source_extend = Module(new AsyncValidSync(params.sync, "source_extend"))
val source_valid = Module(new AsyncValidSync(params.sync, "source_valid"))
sink_valid_0 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
sink_valid_1 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
source_extend.reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
source_valid .reset := reset.asAsyncReset
sink_valid_0 .clock := clock
sink_valid_1 .clock := clock
source_extend.clock := clock
source_valid .clock := clock
sink_valid_0.io.in := true.B
sink_valid_1.io.in := sink_valid_0.io.out
sio.ridx_valid := sink_valid_1.io.out
source_extend.io.in := sio.widx_valid
source_valid.io.in := source_extend.io.out
source_ready := source_valid.io.out
sio.sink_reset_n := !reset.asBool
// TODO: write some sort of sanity check assertion for users
// that denote don't reset when there is activity
//
// val reset_and_extend = !source_ready || !sio.source_reset_n || reset.asBool
// val reset_and_extend_prev = RegNext(reset_and_extend, true.B)
// val reset_rise = !reset_and_extend_prev && reset_and_extend
// val prev_idx_match = AsyncResetReg(updateData=(io.async.widx===io.async.ridx), resetData=0)
// assert (!reset_rise || prev_idx_match.asBool, "Source reset while AsyncQueueSink not empty")
}
}
object FromAsyncBundle
{
// Sometimes it makes sense for the sink to have different sync than the source
def apply[T <: Data](x: AsyncBundle[T]): DecoupledIO[T] = apply(x, x.params.sync)
def apply[T <: Data](x: AsyncBundle[T], sync: Int): DecoupledIO[T] = {
val sink = Module(new AsyncQueueSink(chiselTypeOf(x.mem(0)), x.params.copy(sync = sync)))
sink.io.async <> x
sink.io.deq
}
}
object ToAsyncBundle
{
def apply[T <: Data](x: ReadyValidIO[T], params: AsyncQueueParams = AsyncQueueParams()): AsyncBundle[T] = {
val source = Module(new AsyncQueueSource(chiselTypeOf(x.bits), params))
source.io.enq <> x
source.io.async
}
}
class AsyncQueue[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Crossing[T] {
val io = IO(new CrossingIO(gen))
val source = withClockAndReset(io.enq_clock, io.enq_reset) { Module(new AsyncQueueSource(gen, params)) }
val sink = withClockAndReset(io.deq_clock, io.deq_reset) { Module(new AsyncQueueSink (gen, params)) }
source.io.enq <> io.enq
io.deq <> sink.io.deq
sink.io.async <> source.io.async
}
| module AsyncValidSync_141( // @[AsyncQueue.scala:58:7]
input io_in, // @[AsyncQueue.scala:59:14]
output io_out, // @[AsyncQueue.scala:59:14]
input clock, // @[AsyncQueue.scala:63:17]
input reset // @[AsyncQueue.scala:64:17]
);
wire io_in_0 = io_in; // @[AsyncQueue.scala:58:7]
wire _io_out_WIRE; // @[ShiftReg.scala:48:24]
wire io_out_0; // @[AsyncQueue.scala:58:7]
assign io_out_0 = _io_out_WIRE; // @[ShiftReg.scala:48:24]
AsyncResetSynchronizerShiftReg_w1_d3_i0_155 io_out_sink_valid_1 ( // @[ShiftReg.scala:45:23]
.clock (clock),
.reset (reset),
.io_d (io_in_0), // @[AsyncQueue.scala:58:7]
.io_q (_io_out_WIRE)
); // @[ShiftReg.scala:45:23]
assign io_out = io_out_0; // @[AsyncQueue.scala:58:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File ClockDomain.scala:
package freechips.rocketchip.prci
import chisel3._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy.lazymodule._
abstract class Domain(implicit p: Parameters) extends LazyModule with HasDomainCrossing
{
def clockBundle: ClockBundle
lazy val module = new Impl
class Impl extends LazyRawModuleImp(this) {
childClock := clockBundle.clock
childReset := clockBundle.reset
override def provideImplicitClockToLazyChildren = true
// these are just for backwards compatibility with external devices
// that were manually wiring themselves to the domain's clock/reset input:
val clock = IO(Output(chiselTypeOf(clockBundle.clock)))
val reset = IO(Output(chiselTypeOf(clockBundle.reset)))
clock := clockBundle.clock
reset := clockBundle.reset
}
}
abstract class ClockDomain(implicit p: Parameters) extends Domain with HasClockDomainCrossing
class ClockSinkDomain(val clockSinkParams: ClockSinkParameters)(implicit p: Parameters) extends ClockDomain
{
def this(take: Option[ClockParameters] = None, name: Option[String] = None)(implicit p: Parameters) = this(ClockSinkParameters(take = take, name = name))
val clockNode = ClockSinkNode(Seq(clockSinkParams))
def clockBundle = clockNode.in.head._1
override lazy val desiredName = (clockSinkParams.name.toSeq :+ "ClockSinkDomain").mkString
}
class ClockSourceDomain(val clockSourceParams: ClockSourceParameters)(implicit p: Parameters) extends ClockDomain
{
def this(give: Option[ClockParameters] = None, name: Option[String] = None)(implicit p: Parameters) = this(ClockSourceParameters(give = give, name = name))
val clockNode = ClockSourceNode(Seq(clockSourceParams))
def clockBundle = clockNode.out.head._1
override lazy val desiredName = (clockSourceParams.name.toSeq :+ "ClockSourceDomain").mkString
}
abstract class ResetDomain(implicit p: Parameters) extends Domain with HasResetDomainCrossing
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
File NoC.scala:
package constellation.noc
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.{Field, Parameters}
import freechips.rocketchip.diplomacy.{LazyModule, LazyModuleImp, BundleBridgeSink, InModuleBody}
import freechips.rocketchip.util.ElaborationArtefacts
import freechips.rocketchip.prci._
import constellation.router._
import constellation.channel._
import constellation.routing.{RoutingRelation, ChannelRoutingInfo}
import constellation.topology.{PhysicalTopology, UnidirectionalLine}
class NoCTerminalIO(
val ingressParams: Seq[IngressChannelParams],
val egressParams: Seq[EgressChannelParams])(implicit val p: Parameters) extends Bundle {
val ingress = MixedVec(ingressParams.map { u => Flipped(new IngressChannel(u)) })
val egress = MixedVec(egressParams.map { u => new EgressChannel(u) })
}
class NoC(nocParams: NoCParams)(implicit p: Parameters) extends LazyModule {
override def shouldBeInlined = nocParams.inlineNoC
val internalParams = InternalNoCParams(nocParams)
val allChannelParams = internalParams.channelParams
val allIngressParams = internalParams.ingressParams
val allEgressParams = internalParams.egressParams
val allRouterParams = internalParams.routerParams
val iP = p.alterPartial({ case InternalNoCKey => internalParams })
val nNodes = nocParams.topology.nNodes
val nocName = nocParams.nocName
val skipValidationChecks = nocParams.skipValidationChecks
val clockSourceNodes = Seq.tabulate(nNodes) { i => ClockSourceNode(Seq(ClockSourceParameters())) }
val router_sink_domains = Seq.tabulate(nNodes) { i =>
val router_sink_domain = LazyModule(new ClockSinkDomain(ClockSinkParameters(
name = Some(s"${nocName}_router_$i")
)))
router_sink_domain.clockNode := clockSourceNodes(i)
router_sink_domain
}
val routers = Seq.tabulate(nNodes) { i => router_sink_domains(i) {
val inParams = allChannelParams.filter(_.destId == i).map(
_.copy(payloadBits=allRouterParams(i).user.payloadBits)
)
val outParams = allChannelParams.filter(_.srcId == i).map(
_.copy(payloadBits=allRouterParams(i).user.payloadBits)
)
val ingressParams = allIngressParams.filter(_.destId == i).map(
_.copy(payloadBits=allRouterParams(i).user.payloadBits)
)
val egressParams = allEgressParams.filter(_.srcId == i).map(
_.copy(payloadBits=allRouterParams(i).user.payloadBits)
)
val noIn = inParams.size + ingressParams.size == 0
val noOut = outParams.size + egressParams.size == 0
if (noIn || noOut) {
println(s"Constellation WARNING: $nocName router $i seems to be unused, it will not be generated")
None
} else {
Some(LazyModule(new Router(
routerParams = allRouterParams(i),
preDiplomaticInParams = inParams,
preDiplomaticIngressParams = ingressParams,
outDests = outParams.map(_.destId),
egressIds = egressParams.map(_.egressId)
)(iP)))
}
}}.flatten
val ingressNodes = allIngressParams.map { u => IngressChannelSourceNode(u.destId) }
val egressNodes = allEgressParams.map { u => EgressChannelDestNode(u) }
// Generate channels between routers diplomatically
Seq.tabulate(nNodes, nNodes) { case (i, j) => if (i != j) {
val routerI = routers.find(_.nodeId == i)
val routerJ = routers.find(_.nodeId == j)
if (routerI.isDefined && routerJ.isDefined) {
val sourceNodes: Seq[ChannelSourceNode] = routerI.get.sourceNodes.filter(_.destId == j)
val destNodes: Seq[ChannelDestNode] = routerJ.get.destNodes.filter(_.destParams.srcId == i)
require (sourceNodes.size == destNodes.size)
(sourceNodes zip destNodes).foreach { case (src, dst) =>
val channelParam = allChannelParams.find(c => c.srcId == i && c.destId == j).get
router_sink_domains(j) {
implicit val p: Parameters = iP
(dst
:= ChannelWidthWidget(routerJ.get.payloadBits, routerI.get.payloadBits)
:= channelParam.channelGen(p)(src)
)
}
}
}
}}
// Generate terminal channels diplomatically
routers.foreach { dst => router_sink_domains(dst.nodeId) {
implicit val p: Parameters = iP
dst.ingressNodes.foreach(n => {
val ingressId = n.destParams.ingressId
require(dst.payloadBits <= allIngressParams(ingressId).payloadBits)
(n
:= IngressWidthWidget(dst.payloadBits, allIngressParams(ingressId).payloadBits)
:= ingressNodes(ingressId)
)
})
dst.egressNodes.foreach(n => {
val egressId = n.egressId
require(dst.payloadBits <= allEgressParams(egressId).payloadBits)
(egressNodes(egressId)
:= EgressWidthWidget(allEgressParams(egressId).payloadBits, dst.payloadBits)
:= n
)
})
}}
val debugNodes = routers.map { r =>
val sink = BundleBridgeSink[DebugBundle]()
sink := r.debugNode
sink
}
val ctrlNodes = if (nocParams.hasCtrl) {
(0 until nNodes).map { i =>
routers.find(_.nodeId == i).map { r =>
val sink = BundleBridgeSink[RouterCtrlBundle]()
sink := r.ctrlNode.get
sink
}
}
} else {
Nil
}
println(s"Constellation: $nocName Finished parameter validation")
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
println(s"Constellation: $nocName Starting NoC RTL generation")
val io = IO(new NoCTerminalIO(allIngressParams, allEgressParams)(iP) {
val router_clocks = Vec(nNodes, Input(new ClockBundle(ClockBundleParameters())))
val router_ctrl = if (nocParams.hasCtrl) Vec(nNodes, new RouterCtrlBundle) else Nil
})
(io.ingress zip ingressNodes.map(_.out(0)._1)).foreach { case (l,r) => r <> l }
(io.egress zip egressNodes .map(_.in (0)._1)).foreach { case (l,r) => l <> r }
(io.router_clocks zip clockSourceNodes.map(_.out(0)._1)).foreach { case (l,r) => l <> r }
if (nocParams.hasCtrl) {
ctrlNodes.zipWithIndex.map { case (c,i) =>
if (c.isDefined) {
io.router_ctrl(i) <> c.get.in(0)._1
} else {
io.router_ctrl(i) <> DontCare
}
}
}
// TODO: These assume a single clock-domain across the entire noc
val debug_va_stall_ctr = RegInit(0.U(64.W))
val debug_sa_stall_ctr = RegInit(0.U(64.W))
val debug_any_stall_ctr = debug_va_stall_ctr + debug_sa_stall_ctr
debug_va_stall_ctr := debug_va_stall_ctr + debugNodes.map(_.in(0)._1.va_stall.reduce(_+_)).reduce(_+_)
debug_sa_stall_ctr := debug_sa_stall_ctr + debugNodes.map(_.in(0)._1.sa_stall.reduce(_+_)).reduce(_+_)
dontTouch(debug_va_stall_ctr)
dontTouch(debug_sa_stall_ctr)
dontTouch(debug_any_stall_ctr)
def prepend(s: String) = Seq(nocName, s).mkString(".")
ElaborationArtefacts.add(prepend("noc.graphml"), graphML)
val adjList = routers.map { r =>
val outs = r.outParams.map(o => s"${o.destId}").mkString(" ")
val egresses = r.egressParams.map(e => s"e${e.egressId}").mkString(" ")
val ingresses = r.ingressParams.map(i => s"i${i.ingressId} ${r.nodeId}")
(Seq(s"${r.nodeId} $outs $egresses") ++ ingresses).mkString("\n")
}.mkString("\n")
ElaborationArtefacts.add(prepend("noc.adjlist"), adjList)
val xys = routers.map(r => {
val n = r.nodeId
val ids = (Seq(r.nodeId.toString)
++ r.egressParams.map(e => s"e${e.egressId}")
++ r.ingressParams.map(i => s"i${i.ingressId}")
)
val plotter = nocParams.topology.plotter
val coords = (Seq(plotter.node(r.nodeId))
++ Seq.tabulate(r.egressParams.size ) { i => plotter. egress(i, r. egressParams.size, r.nodeId) }
++ Seq.tabulate(r.ingressParams.size) { i => plotter.ingress(i, r.ingressParams.size, r.nodeId) }
)
(ids zip coords).map { case (i, (x, y)) => s"$i $x $y" }.mkString("\n")
}).mkString("\n")
ElaborationArtefacts.add(prepend("noc.xy"), xys)
val edgeProps = routers.map { r =>
val outs = r.outParams.map { o =>
(Seq(s"${r.nodeId} ${o.destId}") ++ (if (o.possibleFlows.size == 0) Some("unused") else None))
.mkString(" ")
}
val egresses = r.egressParams.map { e =>
(Seq(s"${r.nodeId} e${e.egressId}") ++ (if (e.possibleFlows.size == 0) Some("unused") else None))
.mkString(" ")
}
val ingresses = r.ingressParams.map { i =>
(Seq(s"i${i.ingressId} ${r.nodeId}") ++ (if (i.possibleFlows.size == 0) Some("unused") else None))
.mkString(" ")
}
(outs ++ egresses ++ ingresses).mkString("\n")
}.mkString("\n")
ElaborationArtefacts.add(prepend("noc.edgeprops"), edgeProps)
println(s"Constellation: $nocName Finished NoC RTL generation")
}
}
| module TLSplitACDxBENoC_acd_router_3ClockSinkDomain( // @[ClockDomain.scala:14:9]
output [1:0] auto_routers_debug_out_va_stall_0, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_routers_debug_out_va_stall_1, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_routers_debug_out_va_stall_2, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_routers_debug_out_va_stall_3, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_routers_debug_out_sa_stall_0, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_routers_debug_out_sa_stall_1, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_routers_debug_out_sa_stall_2, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_routers_debug_out_sa_stall_3, // @[LazyModuleImp.scala:107:25]
input auto_routers_egress_nodes_out_flit_ready, // @[LazyModuleImp.scala:107:25]
output auto_routers_egress_nodes_out_flit_valid, // @[LazyModuleImp.scala:107:25]
output auto_routers_egress_nodes_out_flit_bits_head, // @[LazyModuleImp.scala:107:25]
output auto_routers_egress_nodes_out_flit_bits_tail, // @[LazyModuleImp.scala:107:25]
output [144:0] auto_routers_egress_nodes_out_flit_bits_payload, // @[LazyModuleImp.scala:107:25]
output auto_routers_ingress_nodes_in_1_flit_ready, // @[LazyModuleImp.scala:107:25]
input auto_routers_ingress_nodes_in_1_flit_valid, // @[LazyModuleImp.scala:107:25]
input auto_routers_ingress_nodes_in_1_flit_bits_head, // @[LazyModuleImp.scala:107:25]
input auto_routers_ingress_nodes_in_1_flit_bits_tail, // @[LazyModuleImp.scala:107:25]
input [144:0] auto_routers_ingress_nodes_in_1_flit_bits_payload, // @[LazyModuleImp.scala:107:25]
input [4:0] auto_routers_ingress_nodes_in_1_flit_bits_egress_id, // @[LazyModuleImp.scala:107:25]
output auto_routers_ingress_nodes_in_0_flit_ready, // @[LazyModuleImp.scala:107:25]
input auto_routers_ingress_nodes_in_0_flit_valid, // @[LazyModuleImp.scala:107:25]
input auto_routers_ingress_nodes_in_0_flit_bits_head, // @[LazyModuleImp.scala:107:25]
input auto_routers_ingress_nodes_in_0_flit_bits_tail, // @[LazyModuleImp.scala:107:25]
input [144:0] auto_routers_ingress_nodes_in_0_flit_bits_payload, // @[LazyModuleImp.scala:107:25]
input [4:0] auto_routers_ingress_nodes_in_0_flit_bits_egress_id, // @[LazyModuleImp.scala:107:25]
output auto_routers_source_nodes_out_1_flit_0_valid, // @[LazyModuleImp.scala:107:25]
output auto_routers_source_nodes_out_1_flit_0_bits_head, // @[LazyModuleImp.scala:107:25]
output auto_routers_source_nodes_out_1_flit_0_bits_tail, // @[LazyModuleImp.scala:107:25]
output [144:0] auto_routers_source_nodes_out_1_flit_0_bits_payload, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_routers_source_nodes_out_1_flit_0_bits_flow_vnet_id, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_routers_source_nodes_out_1_flit_0_bits_flow_ingress_node, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_routers_source_nodes_out_1_flit_0_bits_flow_ingress_node_id, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_routers_source_nodes_out_1_flit_0_bits_flow_egress_node, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_routers_source_nodes_out_1_flit_0_bits_flow_egress_node_id, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_routers_source_nodes_out_1_flit_0_bits_virt_channel_id, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_routers_source_nodes_out_1_credit_return, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_routers_source_nodes_out_1_vc_free, // @[LazyModuleImp.scala:107:25]
output auto_routers_source_nodes_out_0_flit_0_valid, // @[LazyModuleImp.scala:107:25]
output auto_routers_source_nodes_out_0_flit_0_bits_head, // @[LazyModuleImp.scala:107:25]
output auto_routers_source_nodes_out_0_flit_0_bits_tail, // @[LazyModuleImp.scala:107:25]
output [144:0] auto_routers_source_nodes_out_0_flit_0_bits_payload, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_routers_source_nodes_out_0_flit_0_bits_flow_vnet_id, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_routers_source_nodes_out_0_flit_0_bits_flow_ingress_node, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_routers_source_nodes_out_0_flit_0_bits_flow_ingress_node_id, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_routers_source_nodes_out_0_flit_0_bits_flow_egress_node, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_routers_source_nodes_out_0_flit_0_bits_flow_egress_node_id, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_routers_source_nodes_out_0_flit_0_bits_virt_channel_id, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_routers_source_nodes_out_0_credit_return, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_routers_source_nodes_out_0_vc_free, // @[LazyModuleImp.scala:107:25]
input auto_routers_dest_nodes_in_1_flit_0_valid, // @[LazyModuleImp.scala:107:25]
input auto_routers_dest_nodes_in_1_flit_0_bits_head, // @[LazyModuleImp.scala:107:25]
input auto_routers_dest_nodes_in_1_flit_0_bits_tail, // @[LazyModuleImp.scala:107:25]
input [144:0] auto_routers_dest_nodes_in_1_flit_0_bits_payload, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_routers_dest_nodes_in_1_flit_0_bits_flow_vnet_id, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_routers_dest_nodes_in_1_flit_0_bits_flow_ingress_node, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_routers_dest_nodes_in_1_flit_0_bits_flow_ingress_node_id, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_routers_dest_nodes_in_1_flit_0_bits_flow_egress_node, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_routers_dest_nodes_in_1_flit_0_bits_flow_egress_node_id, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_routers_dest_nodes_in_1_flit_0_bits_virt_channel_id, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_routers_dest_nodes_in_1_credit_return, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_routers_dest_nodes_in_1_vc_free, // @[LazyModuleImp.scala:107:25]
input auto_routers_dest_nodes_in_0_flit_0_valid, // @[LazyModuleImp.scala:107:25]
input auto_routers_dest_nodes_in_0_flit_0_bits_head, // @[LazyModuleImp.scala:107:25]
input auto_routers_dest_nodes_in_0_flit_0_bits_tail, // @[LazyModuleImp.scala:107:25]
input [144:0] auto_routers_dest_nodes_in_0_flit_0_bits_payload, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_routers_dest_nodes_in_0_flit_0_bits_flow_vnet_id, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_routers_dest_nodes_in_0_flit_0_bits_flow_ingress_node, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_routers_dest_nodes_in_0_flit_0_bits_flow_ingress_node_id, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_routers_dest_nodes_in_0_flit_0_bits_flow_egress_node, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_routers_dest_nodes_in_0_flit_0_bits_flow_egress_node_id, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_routers_dest_nodes_in_0_flit_0_bits_virt_channel_id, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_routers_dest_nodes_in_0_credit_return, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_routers_dest_nodes_in_0_vc_free, // @[LazyModuleImp.scala:107:25]
input auto_clock_in_clock, // @[LazyModuleImp.scala:107:25]
input auto_clock_in_reset // @[LazyModuleImp.scala:107:25]
);
Router_3 routers ( // @[NoC.scala:67:22]
.clock (auto_clock_in_clock),
.reset (auto_clock_in_reset),
.auto_debug_out_va_stall_0 (auto_routers_debug_out_va_stall_0),
.auto_debug_out_va_stall_1 (auto_routers_debug_out_va_stall_1),
.auto_debug_out_va_stall_2 (auto_routers_debug_out_va_stall_2),
.auto_debug_out_va_stall_3 (auto_routers_debug_out_va_stall_3),
.auto_debug_out_sa_stall_0 (auto_routers_debug_out_sa_stall_0),
.auto_debug_out_sa_stall_1 (auto_routers_debug_out_sa_stall_1),
.auto_debug_out_sa_stall_2 (auto_routers_debug_out_sa_stall_2),
.auto_debug_out_sa_stall_3 (auto_routers_debug_out_sa_stall_3),
.auto_egress_nodes_out_flit_ready (auto_routers_egress_nodes_out_flit_ready),
.auto_egress_nodes_out_flit_valid (auto_routers_egress_nodes_out_flit_valid),
.auto_egress_nodes_out_flit_bits_head (auto_routers_egress_nodes_out_flit_bits_head),
.auto_egress_nodes_out_flit_bits_tail (auto_routers_egress_nodes_out_flit_bits_tail),
.auto_egress_nodes_out_flit_bits_payload (auto_routers_egress_nodes_out_flit_bits_payload),
.auto_ingress_nodes_in_1_flit_ready (auto_routers_ingress_nodes_in_1_flit_ready),
.auto_ingress_nodes_in_1_flit_valid (auto_routers_ingress_nodes_in_1_flit_valid),
.auto_ingress_nodes_in_1_flit_bits_head (auto_routers_ingress_nodes_in_1_flit_bits_head),
.auto_ingress_nodes_in_1_flit_bits_tail (auto_routers_ingress_nodes_in_1_flit_bits_tail),
.auto_ingress_nodes_in_1_flit_bits_payload (auto_routers_ingress_nodes_in_1_flit_bits_payload),
.auto_ingress_nodes_in_1_flit_bits_egress_id (auto_routers_ingress_nodes_in_1_flit_bits_egress_id),
.auto_ingress_nodes_in_0_flit_ready (auto_routers_ingress_nodes_in_0_flit_ready),
.auto_ingress_nodes_in_0_flit_valid (auto_routers_ingress_nodes_in_0_flit_valid),
.auto_ingress_nodes_in_0_flit_bits_head (auto_routers_ingress_nodes_in_0_flit_bits_head),
.auto_ingress_nodes_in_0_flit_bits_tail (auto_routers_ingress_nodes_in_0_flit_bits_tail),
.auto_ingress_nodes_in_0_flit_bits_payload (auto_routers_ingress_nodes_in_0_flit_bits_payload),
.auto_ingress_nodes_in_0_flit_bits_egress_id (auto_routers_ingress_nodes_in_0_flit_bits_egress_id),
.auto_source_nodes_out_1_flit_0_valid (auto_routers_source_nodes_out_1_flit_0_valid),
.auto_source_nodes_out_1_flit_0_bits_head (auto_routers_source_nodes_out_1_flit_0_bits_head),
.auto_source_nodes_out_1_flit_0_bits_tail (auto_routers_source_nodes_out_1_flit_0_bits_tail),
.auto_source_nodes_out_1_flit_0_bits_payload (auto_routers_source_nodes_out_1_flit_0_bits_payload),
.auto_source_nodes_out_1_flit_0_bits_flow_vnet_id (auto_routers_source_nodes_out_1_flit_0_bits_flow_vnet_id),
.auto_source_nodes_out_1_flit_0_bits_flow_ingress_node (auto_routers_source_nodes_out_1_flit_0_bits_flow_ingress_node),
.auto_source_nodes_out_1_flit_0_bits_flow_ingress_node_id (auto_routers_source_nodes_out_1_flit_0_bits_flow_ingress_node_id),
.auto_source_nodes_out_1_flit_0_bits_flow_egress_node (auto_routers_source_nodes_out_1_flit_0_bits_flow_egress_node),
.auto_source_nodes_out_1_flit_0_bits_flow_egress_node_id (auto_routers_source_nodes_out_1_flit_0_bits_flow_egress_node_id),
.auto_source_nodes_out_1_flit_0_bits_virt_channel_id (auto_routers_source_nodes_out_1_flit_0_bits_virt_channel_id),
.auto_source_nodes_out_1_credit_return (auto_routers_source_nodes_out_1_credit_return),
.auto_source_nodes_out_1_vc_free (auto_routers_source_nodes_out_1_vc_free),
.auto_source_nodes_out_0_flit_0_valid (auto_routers_source_nodes_out_0_flit_0_valid),
.auto_source_nodes_out_0_flit_0_bits_head (auto_routers_source_nodes_out_0_flit_0_bits_head),
.auto_source_nodes_out_0_flit_0_bits_tail (auto_routers_source_nodes_out_0_flit_0_bits_tail),
.auto_source_nodes_out_0_flit_0_bits_payload (auto_routers_source_nodes_out_0_flit_0_bits_payload),
.auto_source_nodes_out_0_flit_0_bits_flow_vnet_id (auto_routers_source_nodes_out_0_flit_0_bits_flow_vnet_id),
.auto_source_nodes_out_0_flit_0_bits_flow_ingress_node (auto_routers_source_nodes_out_0_flit_0_bits_flow_ingress_node),
.auto_source_nodes_out_0_flit_0_bits_flow_ingress_node_id (auto_routers_source_nodes_out_0_flit_0_bits_flow_ingress_node_id),
.auto_source_nodes_out_0_flit_0_bits_flow_egress_node (auto_routers_source_nodes_out_0_flit_0_bits_flow_egress_node),
.auto_source_nodes_out_0_flit_0_bits_flow_egress_node_id (auto_routers_source_nodes_out_0_flit_0_bits_flow_egress_node_id),
.auto_source_nodes_out_0_flit_0_bits_virt_channel_id (auto_routers_source_nodes_out_0_flit_0_bits_virt_channel_id),
.auto_source_nodes_out_0_credit_return (auto_routers_source_nodes_out_0_credit_return),
.auto_source_nodes_out_0_vc_free (auto_routers_source_nodes_out_0_vc_free),
.auto_dest_nodes_in_1_flit_0_valid (auto_routers_dest_nodes_in_1_flit_0_valid),
.auto_dest_nodes_in_1_flit_0_bits_head (auto_routers_dest_nodes_in_1_flit_0_bits_head),
.auto_dest_nodes_in_1_flit_0_bits_tail (auto_routers_dest_nodes_in_1_flit_0_bits_tail),
.auto_dest_nodes_in_1_flit_0_bits_payload (auto_routers_dest_nodes_in_1_flit_0_bits_payload),
.auto_dest_nodes_in_1_flit_0_bits_flow_vnet_id (auto_routers_dest_nodes_in_1_flit_0_bits_flow_vnet_id),
.auto_dest_nodes_in_1_flit_0_bits_flow_ingress_node (auto_routers_dest_nodes_in_1_flit_0_bits_flow_ingress_node),
.auto_dest_nodes_in_1_flit_0_bits_flow_ingress_node_id (auto_routers_dest_nodes_in_1_flit_0_bits_flow_ingress_node_id),
.auto_dest_nodes_in_1_flit_0_bits_flow_egress_node (auto_routers_dest_nodes_in_1_flit_0_bits_flow_egress_node),
.auto_dest_nodes_in_1_flit_0_bits_flow_egress_node_id (auto_routers_dest_nodes_in_1_flit_0_bits_flow_egress_node_id),
.auto_dest_nodes_in_1_flit_0_bits_virt_channel_id (auto_routers_dest_nodes_in_1_flit_0_bits_virt_channel_id),
.auto_dest_nodes_in_1_credit_return (auto_routers_dest_nodes_in_1_credit_return),
.auto_dest_nodes_in_1_vc_free (auto_routers_dest_nodes_in_1_vc_free),
.auto_dest_nodes_in_0_flit_0_valid (auto_routers_dest_nodes_in_0_flit_0_valid),
.auto_dest_nodes_in_0_flit_0_bits_head (auto_routers_dest_nodes_in_0_flit_0_bits_head),
.auto_dest_nodes_in_0_flit_0_bits_tail (auto_routers_dest_nodes_in_0_flit_0_bits_tail),
.auto_dest_nodes_in_0_flit_0_bits_payload (auto_routers_dest_nodes_in_0_flit_0_bits_payload),
.auto_dest_nodes_in_0_flit_0_bits_flow_vnet_id (auto_routers_dest_nodes_in_0_flit_0_bits_flow_vnet_id),
.auto_dest_nodes_in_0_flit_0_bits_flow_ingress_node (auto_routers_dest_nodes_in_0_flit_0_bits_flow_ingress_node),
.auto_dest_nodes_in_0_flit_0_bits_flow_ingress_node_id (auto_routers_dest_nodes_in_0_flit_0_bits_flow_ingress_node_id),
.auto_dest_nodes_in_0_flit_0_bits_flow_egress_node (auto_routers_dest_nodes_in_0_flit_0_bits_flow_egress_node),
.auto_dest_nodes_in_0_flit_0_bits_flow_egress_node_id (auto_routers_dest_nodes_in_0_flit_0_bits_flow_egress_node_id),
.auto_dest_nodes_in_0_flit_0_bits_virt_channel_id (auto_routers_dest_nodes_in_0_flit_0_bits_virt_channel_id),
.auto_dest_nodes_in_0_credit_return (auto_routers_dest_nodes_in_0_credit_return),
.auto_dest_nodes_in_0_vc_free (auto_routers_dest_nodes_in_0_vc_free)
); // @[NoC.scala:67:22]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Transposer.scala:
package gemmini
import chisel3._
import chisel3.util._
import Util._
trait Transposer[T <: Data] extends Module {
def dim: Int
def dataType: T
val io = IO(new Bundle {
val inRow = Flipped(Decoupled(Vec(dim, dataType)))
val outCol = Decoupled(Vec(dim, dataType))
})
}
class PipelinedTransposer[T <: Data](val dim: Int, val dataType: T) extends Transposer[T] {
require(isPow2(dim))
val regArray = Seq.fill(dim, dim)(Reg(dataType))
val regArrayT = regArray.transpose
val sMoveUp :: sMoveLeft :: Nil = Enum(2)
val state = RegInit(sMoveUp)
val leftCounter = RegInit(0.U(log2Ceil(dim+1).W)) //(io.inRow.fire && state === sMoveLeft, dim+1)
val upCounter = RegInit(0.U(log2Ceil(dim+1).W)) //Counter(io.inRow.fire && state === sMoveUp, dim+1)
io.outCol.valid := 0.U
io.inRow.ready := 0.U
switch(state) {
is(sMoveUp) {
io.inRow.ready := upCounter <= dim.U
io.outCol.valid := leftCounter > 0.U
when(io.inRow.fire) {
upCounter := upCounter + 1.U
}
when(upCounter === (dim-1).U) {
state := sMoveLeft
leftCounter := 0.U
}
when(io.outCol.fire) {
leftCounter := leftCounter - 1.U
}
}
is(sMoveLeft) {
io.inRow.ready := leftCounter <= dim.U // TODO: this is naive
io.outCol.valid := upCounter > 0.U
when(leftCounter === (dim-1).U) {
state := sMoveUp
}
when(io.inRow.fire) {
leftCounter := leftCounter + 1.U
upCounter := 0.U
}
when(io.outCol.fire) {
upCounter := upCounter - 1.U
}
}
}
// Propagate input from bottom row to top row systolically in the move up phase
// TODO: need to iterate over columns to connect Chisel values of type T
// Should be able to operate directly on the Vec, but Seq and Vec don't mix (try Array?)
for (colIdx <- 0 until dim) {
regArray.foldRight(io.inRow.bits(colIdx)) {
case (regRow, prevReg) =>
when (state === sMoveUp) {
regRow(colIdx) := prevReg
}
regRow(colIdx)
}
}
// Propagate input from right side to left side systolically in the move left phase
for (rowIdx <- 0 until dim) {
regArrayT.foldRight(io.inRow.bits(rowIdx)) {
case (regCol, prevReg) =>
when (state === sMoveLeft) {
regCol(rowIdx) := prevReg
}
regCol(rowIdx)
}
}
// Pull from the left side or the top side based on the state
for (idx <- 0 until dim) {
when (state === sMoveUp) {
io.outCol.bits(idx) := regArray(0)(idx)
}.elsewhen(state === sMoveLeft) {
io.outCol.bits(idx) := regArrayT(0)(idx)
}.otherwise {
io.outCol.bits(idx) := DontCare
}
}
}
class AlwaysOutTransposer[T <: Data](val dim: Int, val dataType: T) extends Transposer[T] {
require(isPow2(dim))
val LEFT_DIR = 0.U(1.W)
val UP_DIR = 1.U(1.W)
class PE extends Module {
val io = IO(new Bundle {
val inR = Input(dataType)
val inD = Input(dataType)
val outL = Output(dataType)
val outU = Output(dataType)
val dir = Input(UInt(1.W))
val en = Input(Bool())
})
val reg = RegEnable(Mux(io.dir === LEFT_DIR, io.inR, io.inD), io.en)
io.outU := reg
io.outL := reg
}
val pes = Seq.fill(dim,dim)(Module(new PE))
val counter = RegInit(0.U((log2Ceil(dim) max 1).W)) // TODO replace this with a standard Chisel counter
val dir = RegInit(LEFT_DIR)
// Wire up horizontal signals
for (row <- 0 until dim; col <- 0 until dim) {
val right_in = if (col == dim-1) io.inRow.bits(row) else pes(row)(col+1).io.outL
pes(row)(col).io.inR := right_in
}
// Wire up vertical signals
for (row <- 0 until dim; col <- 0 until dim) {
val down_in = if (row == dim-1) io.inRow.bits(col) else pes(row+1)(col).io.outU
pes(row)(col).io.inD := down_in
}
// Wire up global signals
pes.flatten.foreach(_.io.dir := dir)
pes.flatten.foreach(_.io.en := io.inRow.fire)
io.outCol.valid := true.B
io.inRow.ready := true.B
val left_out = VecInit(pes.transpose.head.map(_.io.outL))
val up_out = VecInit(pes.head.map(_.io.outU))
io.outCol.bits := Mux(dir === LEFT_DIR, left_out, up_out)
when (io.inRow.fire) {
counter := wrappingAdd(counter, 1.U, dim)
}
when (counter === (dim-1).U && io.inRow.fire) {
dir := ~dir
}
}
class NaiveTransposer[T <: Data](val dim: Int, val dataType: T) extends Transposer[T] {
val regArray = Seq.fill(dim, dim)(Reg(dataType))
val regArrayT = regArray.transpose
// state = 0 => filling regArray row-wise, state = 1 => draining regArray column-wise
val state = RegInit(0.U(1.W))
val countInc = io.inRow.fire || io.outCol.fire
val (countValue, countWrap) = Counter(countInc, dim)
io.inRow.ready := state === 0.U
io.outCol.valid := state === 1.U
for (i <- 0 until dim) {
for (j <- 0 until dim) {
when(countValue === i.U && io.inRow.fire) {
regArray(i)(j) := io.inRow.bits(j)
}
}
}
for (i <- 0 until dim) {
io.outCol.bits(i) := 0.U
for (j <- 0 until dim) {
when(countValue === j.U) {
io.outCol.bits(i) := regArrayT(j)(i)
}
}
}
when (io.inRow.fire && countWrap) {
state := 1.U
}
when (io.outCol.fire && countWrap) {
state := 0.U
}
assert(!(state === 0.U) || !io.outCol.fire)
assert(!(state === 1.U) || !io.inRow.fire)
}
| module PE_231( // @[Transposer.scala:100:9]
input clock, // @[Transposer.scala:100:9]
input reset, // @[Transposer.scala:100:9]
input [7:0] io_inR, // @[Transposer.scala:101:16]
input [7:0] io_inD, // @[Transposer.scala:101:16]
output [7:0] io_outL, // @[Transposer.scala:101:16]
output [7:0] io_outU, // @[Transposer.scala:101:16]
input io_dir, // @[Transposer.scala:101:16]
input io_en // @[Transposer.scala:101:16]
);
wire [7:0] io_inR_0 = io_inR; // @[Transposer.scala:100:9]
wire [7:0] io_inD_0 = io_inD; // @[Transposer.scala:100:9]
wire io_dir_0 = io_dir; // @[Transposer.scala:100:9]
wire io_en_0 = io_en; // @[Transposer.scala:100:9]
wire [7:0] io_outL_0; // @[Transposer.scala:100:9]
wire [7:0] io_outU_0; // @[Transposer.scala:100:9]
wire _reg_T = ~io_dir_0; // @[Transposer.scala:100:9, :110:36]
wire [7:0] _reg_T_1 = _reg_T ? io_inR_0 : io_inD_0; // @[Transposer.scala:100:9, :110:{28,36}]
reg [7:0] reg_0; // @[Transposer.scala:110:24]
assign io_outL_0 = reg_0; // @[Transposer.scala:100:9, :110:24]
assign io_outU_0 = reg_0; // @[Transposer.scala:100:9, :110:24]
always @(posedge clock) begin // @[Transposer.scala:100:9]
if (io_en_0) // @[Transposer.scala:100:9]
reg_0 <= _reg_T_1; // @[Transposer.scala:110:{24,28}]
always @(posedge)
assign io_outL = io_outL_0; // @[Transposer.scala:100:9]
assign io_outU = io_outU_0; // @[Transposer.scala:100:9]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Monitor.scala:
package constellation.channel
import chisel3._
import chisel3.util._
import freechips.rocketchip.diplomacy._
import org.chipsalliance.cde.config.{Parameters}
import freechips.rocketchip.util._
import constellation.noc.{HasNoCParams}
class NoCMonitor(val cParam: ChannelParams)(implicit val p: Parameters) extends Module with HasNoCParams {
val io = IO(new Bundle {
val in = Input(new Channel(cParam))
})
val in_flight = RegInit(VecInit(Seq.fill(cParam.nVirtualChannels) { false.B }))
for (i <- 0 until cParam.srcSpeedup) {
val flit = io.in.flit(i)
when (flit.valid) {
when (flit.bits.head) {
in_flight(flit.bits.virt_channel_id) := true.B
assert (!in_flight(flit.bits.virt_channel_id), "Flit head/tail sequencing is broken")
}
when (flit.bits.tail) {
in_flight(flit.bits.virt_channel_id) := false.B
}
}
val possibleFlows = cParam.possibleFlows
when (flit.valid && flit.bits.head) {
cParam match {
case n: ChannelParams => n.virtualChannelParams.zipWithIndex.foreach { case (v,i) =>
assert(flit.bits.virt_channel_id =/= i.U || v.possibleFlows.toSeq.map(_.isFlow(flit.bits.flow)).orR)
}
case _ => assert(cParam.possibleFlows.toSeq.map(_.isFlow(flit.bits.flow)).orR)
}
}
}
}
File Types.scala:
package constellation.routing
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.{Parameters}
import constellation.noc.{HasNoCParams}
import constellation.channel.{Flit}
/** A representation for 1 specific virtual channel in wormhole routing
*
* @param src the source node
* @param vc ID for the virtual channel
* @param dst the destination node
* @param n_vc the number of virtual channels
*/
// BEGIN: ChannelRoutingInfo
case class ChannelRoutingInfo(
src: Int,
dst: Int,
vc: Int,
n_vc: Int
) {
// END: ChannelRoutingInfo
require (src >= -1 && dst >= -1 && vc >= 0, s"Illegal $this")
require (!(src == -1 && dst == -1), s"Illegal $this")
require (vc < n_vc, s"Illegal $this")
val isIngress = src == -1
val isEgress = dst == -1
}
/** Represents the properties of a packet that are relevant for routing
* ingressId and egressId uniquely identify a flow, but vnet and dst are used here
* to simplify the implementation of routingrelations
*
* @param ingressId packet's source ingress point
* @param egressId packet's destination egress point
* @param vNet virtual subnetwork identifier
* @param dst packet's destination node ID
*/
// BEGIN: FlowRoutingInfo
case class FlowRoutingInfo(
ingressId: Int,
egressId: Int,
vNetId: Int,
ingressNode: Int,
ingressNodeId: Int,
egressNode: Int,
egressNodeId: Int,
fifo: Boolean
) {
// END: FlowRoutingInfo
def isFlow(f: FlowRoutingBundle): Bool = {
(f.ingress_node === ingressNode.U &&
f.egress_node === egressNode.U &&
f.ingress_node_id === ingressNodeId.U &&
f.egress_node_id === egressNodeId.U)
}
def asLiteral(b: FlowRoutingBundle): BigInt = {
Seq(
(vNetId , b.vnet_id),
(ingressNode , b.ingress_node),
(ingressNodeId , b.ingress_node_id),
(egressNode , b.egress_node),
(egressNodeId , b.egress_node_id)
).foldLeft(0)((l, t) => {
(l << t._2.getWidth) | t._1
})
}
}
class FlowRoutingBundle(implicit val p: Parameters) extends Bundle with HasNoCParams {
// Instead of tracking ingress/egress ID, track the physical destination id and the offset at the destination
// This simplifies the routing tables
val vnet_id = UInt(log2Ceil(nVirtualNetworks).W)
val ingress_node = UInt(log2Ceil(nNodes).W)
val ingress_node_id = UInt(log2Ceil(maxIngressesAtNode).W)
val egress_node = UInt(log2Ceil(nNodes).W)
val egress_node_id = UInt(log2Ceil(maxEgressesAtNode).W)
}
| module NoCMonitor_7( // @[Monitor.scala:11:7]
input clock, // @[Monitor.scala:11:7]
input reset, // @[Monitor.scala:11:7]
input io_in_flit_0_valid, // @[Monitor.scala:12:14]
input io_in_flit_0_bits_head, // @[Monitor.scala:12:14]
input io_in_flit_0_bits_tail, // @[Monitor.scala:12:14]
input [3:0] io_in_flit_0_bits_flow_ingress_node, // @[Monitor.scala:12:14]
input [2:0] io_in_flit_0_bits_flow_ingress_node_id, // @[Monitor.scala:12:14]
input [3:0] io_in_flit_0_bits_flow_egress_node, // @[Monitor.scala:12:14]
input [2:0] io_in_flit_0_bits_flow_egress_node_id, // @[Monitor.scala:12:14]
input [3:0] io_in_flit_0_bits_virt_channel_id // @[Monitor.scala:12:14]
);
reg in_flight_0; // @[Monitor.scala:16:26]
reg in_flight_1; // @[Monitor.scala:16:26]
reg in_flight_2; // @[Monitor.scala:16:26]
reg in_flight_3; // @[Monitor.scala:16:26]
reg in_flight_4; // @[Monitor.scala:16:26]
reg in_flight_5; // @[Monitor.scala:16:26]
reg in_flight_6; // @[Monitor.scala:16:26]
reg in_flight_7; // @[Monitor.scala:16:26]
reg in_flight_8; // @[Monitor.scala:16:26]
reg in_flight_9; // @[Monitor.scala:16:26]
wire _GEN = io_in_flit_0_bits_virt_channel_id == 4'h2; // @[Monitor.scala:21:46]
wire _GEN_0 = io_in_flit_0_bits_virt_channel_id == 4'h6; // @[Monitor.scala:21:46] |
Generate the Verilog code corresponding to the following Chisel files.
File Monitor.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceLine
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import freechips.rocketchip.diplomacy.EnableMonitors
import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode}
import freechips.rocketchip.util.PlusArg
case class TLMonitorArgs(edge: TLEdge)
abstract class TLMonitorBase(args: TLMonitorArgs) extends Module
{
val io = IO(new Bundle {
val in = Input(new TLBundle(args.edge.bundle))
})
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit
legalize(io.in, args.edge, reset)
}
object TLMonitor {
def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = {
if (enable) {
EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) }
} else { node }
}
}
class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args)
{
require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal))
val cover_prop_class = PropertyClass.Default
//Like assert but can flip to being an assumption for formal verification
def monAssert(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir, cond, message, PropertyClass.Default)
}
def assume(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir.flip, cond, message, PropertyClass.Default)
}
def extra = {
args.edge.sourceInfo match {
case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)"
case _ => ""
}
}
def visible(address: UInt, source: UInt, edge: TLEdge) =
edge.client.clients.map { c =>
!c.sourceId.contains(source) ||
c.visibility.map(_.contains(address)).reduce(_ || _)
}.reduce(_ && _)
def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = {
//switch this flag to turn on diplomacy in error messages
def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n"
monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra)
// Reuse these subexpressions to save some firrtl lines
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility")
//The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B
//TODO: check for acquireT?
when (bundle.opcode === TLMessages.AcquireBlock) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AcquirePerm) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra)
monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra)
monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra)
monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra)
}
}
def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = {
monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility")
// Reuse these subexpressions to save some firrtl lines
val address_ok = edge.manager.containsSafe(edge.address(bundle))
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source
when (bundle.opcode === TLMessages.Probe) {
assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra)
assume (address_ok, "'B' channel Probe carries unmanaged address" + extra)
assume (legal_source, "'B' channel Probe carries source that is not first source" + extra)
assume (is_aligned, "'B' channel Probe address not aligned to size" + extra)
assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra)
assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra)
assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra)
monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra)
monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra)
}
}
def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = {
monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val address_ok = edge.manager.containsSafe(edge.address(bundle))
monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility")
when (bundle.opcode === TLMessages.ProbeAck) {
monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ProbeAckData) {
monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.Release) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ReleaseData) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra)
}
}
def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = {
assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val sink_ok = bundle.sink < edge.manager.endSinkId.U
val deny_put_ok = edge.manager.mayDenyPut.B
val deny_get_ok = edge.manager.mayDenyGet.B
when (bundle.opcode === TLMessages.ReleaseAck) {
assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra)
assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra)
assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra)
}
when (bundle.opcode === TLMessages.Grant) {
assume (source_ok, "'D' channel Grant carries invalid source ID" + extra)
assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra)
assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra)
}
when (bundle.opcode === TLMessages.GrantData) {
assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra)
assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra)
}
}
def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = {
val sink_ok = bundle.sink < edge.manager.endSinkId.U
monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra)
}
def legalizeFormat(bundle: TLBundle, edge: TLEdge) = {
when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) }
when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) }
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) }
when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) }
when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) }
} else {
monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra)
monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra)
monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra)
}
}
def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = {
val a_first = edge.first(a.bits, a.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (a.valid && !a_first) {
monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra)
monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra)
monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra)
monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra)
monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra)
}
when (a.fire && a_first) {
opcode := a.bits.opcode
param := a.bits.param
size := a.bits.size
source := a.bits.source
address := a.bits.address
}
}
def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = {
val b_first = edge.first(b.bits, b.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (b.valid && !b_first) {
monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra)
monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra)
monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra)
monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra)
monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra)
}
when (b.fire && b_first) {
opcode := b.bits.opcode
param := b.bits.param
size := b.bits.size
source := b.bits.source
address := b.bits.address
}
}
def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = {
// Symbolic variable
val sym_source = Wire(UInt(edge.client.endSourceId.W))
// TODO: Connect sym_source to a fixed value for simulation and to a
// free wire in formal
sym_source := 0.U
// Type casting Int to UInt
val maxSourceId = Wire(UInt(edge.client.endSourceId.W))
maxSourceId := edge.client.endSourceId.U
// Delayed verison of sym_source
val sym_source_d = Reg(UInt(edge.client.endSourceId.W))
sym_source_d := sym_source
// These will be constraints for FV setup
Property(
MonitorDirection.Monitor,
(sym_source === sym_source_d),
"sym_source should remain stable",
PropertyClass.Default)
Property(
MonitorDirection.Monitor,
(sym_source <= maxSourceId),
"sym_source should take legal value",
PropertyClass.Default)
val my_resp_pend = RegInit(false.B)
val my_opcode = Reg(UInt())
val my_size = Reg(UInt())
val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire)
val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire)
val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source)
val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source)
val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat)
val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend)
when (my_set_resp_pend) {
my_resp_pend := true.B
} .elsewhen (my_clr_resp_pend) {
my_resp_pend := false.B
}
when (my_a_first_beat) {
my_opcode := bundle.a.bits.opcode
my_size := bundle.a.bits.size
}
val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size)
val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode)
val my_resp_opcode_legal = Wire(Bool())
when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) ||
(my_resp_opcode === TLMessages.LogicalData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData)
} .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck)
} .otherwise {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck)
}
monAssert (IfThen(my_resp_pend, !my_a_first_beat),
"Request message should not be sent with a source ID, for which a response message" +
"is already pending (not received until current cycle) for a prior request message" +
"with the same source ID" + extra)
assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)),
"Response message should be accepted with a source ID only if a request message with the" +
"same source ID has been accepted or is being accepted in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)),
"Response message should be sent with a source ID only if a request message with the" +
"same source ID has been accepted or is being sent in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)),
"If d_valid is 1, then d_size should be same as a_size of the corresponding request" +
"message" + extra)
assume (IfThen(my_d_first_beat, my_resp_opcode_legal),
"If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" +
"request message" + extra)
}
def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = {
val c_first = edge.first(c.bits, c.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (c.valid && !c_first) {
monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra)
monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra)
monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra)
monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra)
monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra)
}
when (c.fire && c_first) {
opcode := c.bits.opcode
param := c.bits.param
size := c.bits.size
source := c.bits.source
address := c.bits.address
}
}
def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = {
val d_first = edge.first(d.bits, d.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val sink = Reg(UInt())
val denied = Reg(Bool())
when (d.valid && !d_first) {
assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra)
assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra)
assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra)
assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra)
assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra)
assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra)
}
when (d.fire && d_first) {
opcode := d.bits.opcode
param := d.bits.param
size := d.bits.size
source := d.bits.source
sink := d.bits.sink
denied := d.bits.denied
}
}
def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = {
legalizeMultibeatA(bundle.a, edge)
legalizeMultibeatD(bundle.d, edge)
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
legalizeMultibeatB(bundle.b, edge)
legalizeMultibeatC(bundle.c, edge)
}
}
//This is left in for almond which doesn't adhere to the tilelink protocol
@deprecated("Use legalizeADSource instead if possible","")
def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.client.endSourceId.W))
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val a_set = WireInit(0.U(edge.client.endSourceId.W))
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra)
}
if (edge.manager.minLatency > 0) {
assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = {
val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size)
val log_a_size_bus_size = log2Ceil(a_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error
inflight.suggestName("inflight")
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
inflight_opcodes.suggestName("inflight_opcodes")
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
inflight_sizes.suggestName("inflight_sizes")
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
a_first.suggestName("a_first")
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
d_first.suggestName("d_first")
val a_set = WireInit(0.U(edge.client.endSourceId.W))
val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
a_set.suggestName("a_set")
a_set_wo_ready.suggestName("a_set_wo_ready")
val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
a_opcodes_set.suggestName("a_opcodes_set")
val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
a_sizes_set.suggestName("a_sizes_set")
val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W))
a_opcode_lookup.suggestName("a_opcode_lookup")
a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U
val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W))
a_size_lookup.suggestName("a_size_lookup")
a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U
val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant))
val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant))
val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W))
a_opcodes_set_interm.suggestName("a_opcodes_set_interm")
val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W))
a_sizes_set_interm.suggestName("a_sizes_set_interm")
when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) {
a_set_wo_ready := UIntToOH(bundle.a.bits.source)
}
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U
a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U
a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U)
a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U)
monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
d_opcodes_clr.suggestName("d_opcodes_clr")
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) ||
(bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra)
assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) ||
(bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra)
assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) {
assume((!bundle.d.ready) || bundle.a.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = {
val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size)
val log_c_size_bus_size = log2Ceil(c_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W))
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
inflight.suggestName("inflight")
inflight_opcodes.suggestName("inflight_opcodes")
inflight_sizes.suggestName("inflight_sizes")
val c_first = edge.first(bundle.c.bits, bundle.c.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
c_first.suggestName("c_first")
d_first.suggestName("d_first")
val c_set = WireInit(0.U(edge.client.endSourceId.W))
val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
c_set.suggestName("c_set")
c_set_wo_ready.suggestName("c_set_wo_ready")
c_opcodes_set.suggestName("c_opcodes_set")
c_sizes_set.suggestName("c_sizes_set")
val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W))
val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W))
c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U
c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U
c_opcode_lookup.suggestName("c_opcode_lookup")
c_size_lookup.suggestName("c_size_lookup")
val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W))
val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W))
c_opcodes_set_interm.suggestName("c_opcodes_set_interm")
c_sizes_set_interm.suggestName("c_sizes_set_interm")
when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) {
c_set_wo_ready := UIntToOH(bundle.c.bits.source)
}
when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) {
c_set := UIntToOH(bundle.c.bits.source)
c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U
c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U
c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U)
c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U)
monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra)
}
val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
d_opcodes_clr.suggestName("d_opcodes_clr")
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) {
assume((!bundle.d.ready) || bundle.c.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
when (c_set_wo_ready.orR) {
assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra)
}
}
inflight := (inflight | c_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.manager.endSinkId.W))
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val e_first = true.B
val d_set = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) {
d_set := UIntToOH(bundle.d.bits.sink)
assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra)
}
val e_clr = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) {
e_clr := UIntToOH(bundle.e.bits.sink)
monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra)
}
// edge.client.minLatency applies to BC, not DE
inflight := (inflight | d_set) & ~e_clr
}
def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = {
val sourceBits = log2Ceil(edge.client.endSourceId)
val tooBig = 14 // >16kB worth of flight information gets to be too much
if (sourceBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked")
} else {
if (args.edge.params(TestplanTestType).simulation) {
if (args.edge.params(TLMonitorStrictMode)) {
legalizeADSource(bundle, edge)
legalizeCDSource(bundle, edge)
} else {
legalizeADSourceOld(bundle, edge)
}
}
if (args.edge.params(TestplanTestType).formal) {
legalizeADSourceFormal(bundle, edge)
}
}
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
// legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize...
val sinkBits = log2Ceil(edge.manager.endSinkId)
if (sinkBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked")
} else {
legalizeDESink(bundle, edge)
}
}
}
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = {
legalizeFormat (bundle, edge)
legalizeMultibeat (bundle, edge)
legalizeUnique (bundle, edge)
}
}
File Misc.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
import chisel3.util.random.LFSR
import org.chipsalliance.cde.config.Parameters
import scala.math._
class ParameterizedBundle(implicit p: Parameters) extends Bundle
trait Clocked extends Bundle {
val clock = Clock()
val reset = Bool()
}
object DecoupledHelper {
def apply(rvs: Bool*) = new DecoupledHelper(rvs)
}
class DecoupledHelper(val rvs: Seq[Bool]) {
def fire(exclude: Bool, includes: Bool*) = {
require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!")
(rvs.filter(_ ne exclude) ++ includes).reduce(_ && _)
}
def fire() = {
rvs.reduce(_ && _)
}
}
object MuxT {
def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2))
def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3))
def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4))
}
/** Creates a cascade of n MuxTs to search for a key value. */
object MuxTLookup {
def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
}
object ValidMux {
def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = {
apply(v1 +: v2.toSeq)
}
def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = {
val out = Wire(Valid(valids.head.bits.cloneType))
out.valid := valids.map(_.valid).reduce(_ || _)
out.bits := MuxCase(valids.head.bits,
valids.map(v => (v.valid -> v.bits)))
out
}
}
object Str
{
def apply(s: String): UInt = {
var i = BigInt(0)
require(s.forall(validChar _))
for (c <- s)
i = (i << 8) | c
i.U((s.length*8).W)
}
def apply(x: Char): UInt = {
require(validChar(x))
x.U(8.W)
}
def apply(x: UInt): UInt = apply(x, 10)
def apply(x: UInt, radix: Int): UInt = {
val rad = radix.U
val w = x.getWidth
require(w > 0)
var q = x
var s = digit(q % rad)
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s)
}
s
}
def apply(x: SInt): UInt = apply(x, 10)
def apply(x: SInt, radix: Int): UInt = {
val neg = x < 0.S
val abs = x.abs.asUInt
if (radix != 10) {
Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix))
} else {
val rad = radix.U
val w = abs.getWidth
require(w > 0)
var q = abs
var s = digit(q % rad)
var needSign = neg
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
val placeSpace = q === 0.U
val space = Mux(needSign, Str('-'), Str(' '))
needSign = needSign && !placeSpace
s = Cat(Mux(placeSpace, space, digit(q % rad)), s)
}
Cat(Mux(needSign, Str('-'), Str(' ')), s)
}
}
private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0)
private def validChar(x: Char) = x == (x & 0xFF)
}
object Split
{
def apply(x: UInt, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n2: Int, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
}
object Random
{
def apply(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0)
else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod))
}
def apply(mod: Int): UInt = apply(mod, randomizer)
def oneHot(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0))
else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt
}
def oneHot(mod: Int): UInt = oneHot(mod, randomizer)
private def randomizer = LFSR(16)
private def partition(value: UInt, slices: Int) =
Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U)
}
object Majority {
def apply(in: Set[Bool]): Bool = {
val n = (in.size >> 1) + 1
val clauses = in.subsets(n).map(_.reduce(_ && _))
clauses.reduce(_ || _)
}
def apply(in: Seq[Bool]): Bool = apply(in.toSet)
def apply(in: UInt): Bool = apply(in.asBools.toSet)
}
object PopCountAtLeast {
private def two(x: UInt): (Bool, Bool) = x.getWidth match {
case 1 => (x.asBool, false.B)
case n =>
val half = x.getWidth / 2
val (leftOne, leftTwo) = two(x(half - 1, 0))
val (rightOne, rightTwo) = two(x(x.getWidth - 1, half))
(leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne))
}
def apply(x: UInt, n: Int): Bool = n match {
case 0 => true.B
case 1 => x.orR
case 2 => two(x)._2
case 3 => PopCount(x) >= n.U
}
}
// This gets used everywhere, so make the smallest circuit possible ...
// Given an address and size, create a mask of beatBytes size
// eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111
// groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01
object MaskGen {
def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = {
require (groupBy >= 1 && beatBytes >= groupBy)
require (isPow2(beatBytes) && isPow2(groupBy))
val lgBytes = log2Ceil(beatBytes)
val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U
def helper(i: Int): Seq[(Bool, Bool)] = {
if (i == 0) {
Seq((lgSize >= lgBytes.asUInt, true.B))
} else {
val sub = helper(i-1)
val size = sizeOH(lgBytes - i)
val bit = addr_lo(lgBytes - i)
val nbit = !bit
Seq.tabulate (1 << i) { j =>
val (sub_acc, sub_eq) = sub(j/2)
val eq = sub_eq && (if (j % 2 == 1) bit else nbit)
val acc = sub_acc || (size && eq)
(acc, eq)
}
}
}
if (groupBy == beatBytes) 1.U else
Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse)
}
}
File PlusArg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.experimental._
import chisel3.util.HasBlackBoxResource
@deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05")
case class PlusArgInfo(default: BigInt, docstring: String)
/** Case class for PlusArg information
*
* @tparam A scala type of the PlusArg value
* @param default optional default value
* @param docstring text to include in the help
* @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT)
*/
private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String)
/** Typeclass for converting a type to a doctype string
* @tparam A some type
*/
trait Doctypeable[A] {
/** Return the doctype string for some option */
def toDoctype(a: Option[A]): String
}
/** Object containing implementations of the Doctypeable typeclass */
object Doctypes {
/** Converts an Int => "INT" */
implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" }
/** Converts a BigInt => "INT" */
implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" }
/** Converts a String => "STRING" */
implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" }
}
class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map(
"FORMAT" -> StringParam(format),
"DEFAULT" -> IntParam(default),
"WIDTH" -> IntParam(width)
)) with HasBlackBoxResource {
val io = IO(new Bundle {
val out = Output(UInt(width.W))
})
addResource("/vsrc/plusarg_reader.v")
}
/* This wrapper class has no outputs, making it clear it is a simulation-only construct */
class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module {
val io = IO(new Bundle {
val count = Input(UInt(width.W))
})
val max = Module(new plusarg_reader(format, default, docstring, width)).io.out
when (max > 0.U) {
assert (io.count < max, s"Timeout exceeded: $docstring")
}
}
import Doctypes._
object PlusArg
{
/** PlusArg("foo") will return 42.U if the simulation is run with +foo=42
* Do not use this as an initial register value. The value is set in an
* initial block and thus accessing it from another initial is racey.
* Add a docstring to document the arg, which can be dumped in an elaboration
* pass.
*/
def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out
}
/** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert
* to kill the simulation when count exceeds the specified integer argument.
* Default 0 will never assert.
*/
def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count
}
}
object PlusArgArtefacts {
private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty
/* Add a new PlusArg */
@deprecated(
"Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08",
"Rocket Chip 2020.05"
)
def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring)
/** Add a new PlusArg
*
* @tparam A scala type of the PlusArg value
* @param name name for the PlusArg
* @param default optional default value
* @param docstring text to include in the help
*/
def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit =
artefacts = artefacts ++
Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default)))
/* From plus args, generate help text */
private def serializeHelp_cHeader(tab: String = ""): String = artefacts
.map{ case(arg, info) =>
s"""|$tab+$arg=${info.doctype}\\n\\
|$tab${" "*20}${info.docstring}\\n\\
|""".stripMargin ++ info.default.map{ case default =>
s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("")
}.toSeq.mkString("\\n\\\n") ++ "\""
/* From plus args, generate a char array of their names */
private def serializeArray_cHeader(tab: String = ""): String = {
val prettyTab = tab + " " * 44 // Length of 'static const ...'
s"${tab}static const char * verilog_plusargs [] = {\\\n" ++
artefacts
.map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" }
.mkString("")++
s"${prettyTab}0};"
}
/* Generate C code to be included in emulator.cc that helps with
* argument parsing based on available Verilog PlusArgs */
def serialize_cHeader(): String =
s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\
|${serializeHelp_cHeader(" "*7)}
|${serializeArray_cHeader()}
|""".stripMargin
}
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
File Bundles.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import freechips.rocketchip.util._
import scala.collection.immutable.ListMap
import chisel3.util.Decoupled
import chisel3.util.DecoupledIO
import chisel3.reflect.DataMirror
abstract class TLBundleBase(val params: TLBundleParameters) extends Bundle
// common combos in lazy policy:
// Put + Acquire
// Release + AccessAck
object TLMessages
{
// A B C D E
def PutFullData = 0.U // . . => AccessAck
def PutPartialData = 1.U // . . => AccessAck
def ArithmeticData = 2.U // . . => AccessAckData
def LogicalData = 3.U // . . => AccessAckData
def Get = 4.U // . . => AccessAckData
def Hint = 5.U // . . => HintAck
def AcquireBlock = 6.U // . => Grant[Data]
def AcquirePerm = 7.U // . => Grant[Data]
def Probe = 6.U // . => ProbeAck[Data]
def AccessAck = 0.U // . .
def AccessAckData = 1.U // . .
def HintAck = 2.U // . .
def ProbeAck = 4.U // .
def ProbeAckData = 5.U // .
def Release = 6.U // . => ReleaseAck
def ReleaseData = 7.U // . => ReleaseAck
def Grant = 4.U // . => GrantAck
def GrantData = 5.U // . => GrantAck
def ReleaseAck = 6.U // .
def GrantAck = 0.U // .
def isA(x: UInt) = x <= AcquirePerm
def isB(x: UInt) = x <= Probe
def isC(x: UInt) = x <= ReleaseData
def isD(x: UInt) = x <= ReleaseAck
def adResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, Grant, Grant)
def bcResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, ProbeAck, ProbeAck)
def a = Seq( ("PutFullData",TLPermissions.PermMsgReserved),
("PutPartialData",TLPermissions.PermMsgReserved),
("ArithmeticData",TLAtomics.ArithMsg),
("LogicalData",TLAtomics.LogicMsg),
("Get",TLPermissions.PermMsgReserved),
("Hint",TLHints.HintsMsg),
("AcquireBlock",TLPermissions.PermMsgGrow),
("AcquirePerm",TLPermissions.PermMsgGrow))
def b = Seq( ("PutFullData",TLPermissions.PermMsgReserved),
("PutPartialData",TLPermissions.PermMsgReserved),
("ArithmeticData",TLAtomics.ArithMsg),
("LogicalData",TLAtomics.LogicMsg),
("Get",TLPermissions.PermMsgReserved),
("Hint",TLHints.HintsMsg),
("Probe",TLPermissions.PermMsgCap))
def c = Seq( ("AccessAck",TLPermissions.PermMsgReserved),
("AccessAckData",TLPermissions.PermMsgReserved),
("HintAck",TLPermissions.PermMsgReserved),
("Invalid Opcode",TLPermissions.PermMsgReserved),
("ProbeAck",TLPermissions.PermMsgReport),
("ProbeAckData",TLPermissions.PermMsgReport),
("Release",TLPermissions.PermMsgReport),
("ReleaseData",TLPermissions.PermMsgReport))
def d = Seq( ("AccessAck",TLPermissions.PermMsgReserved),
("AccessAckData",TLPermissions.PermMsgReserved),
("HintAck",TLPermissions.PermMsgReserved),
("Invalid Opcode",TLPermissions.PermMsgReserved),
("Grant",TLPermissions.PermMsgCap),
("GrantData",TLPermissions.PermMsgCap),
("ReleaseAck",TLPermissions.PermMsgReserved))
}
/**
* The three primary TileLink permissions are:
* (T)runk: the agent is (or is on inwards path to) the global point of serialization.
* (B)ranch: the agent is on an outwards path to
* (N)one:
* These permissions are permuted by transfer operations in various ways.
* Operations can cap permissions, request for them to be grown or shrunk,
* or for a report on their current status.
*/
object TLPermissions
{
val aWidth = 2
val bdWidth = 2
val cWidth = 3
// Cap types (Grant = new permissions, Probe = permisions <= target)
def toT = 0.U(bdWidth.W)
def toB = 1.U(bdWidth.W)
def toN = 2.U(bdWidth.W)
def isCap(x: UInt) = x <= toN
// Grow types (Acquire = permissions >= target)
def NtoB = 0.U(aWidth.W)
def NtoT = 1.U(aWidth.W)
def BtoT = 2.U(aWidth.W)
def isGrow(x: UInt) = x <= BtoT
// Shrink types (ProbeAck, Release)
def TtoB = 0.U(cWidth.W)
def TtoN = 1.U(cWidth.W)
def BtoN = 2.U(cWidth.W)
def isShrink(x: UInt) = x <= BtoN
// Report types (ProbeAck, Release)
def TtoT = 3.U(cWidth.W)
def BtoB = 4.U(cWidth.W)
def NtoN = 5.U(cWidth.W)
def isReport(x: UInt) = x <= NtoN
def PermMsgGrow:Seq[String] = Seq("Grow NtoB", "Grow NtoT", "Grow BtoT")
def PermMsgCap:Seq[String] = Seq("Cap toT", "Cap toB", "Cap toN")
def PermMsgReport:Seq[String] = Seq("Shrink TtoB", "Shrink TtoN", "Shrink BtoN", "Report TotT", "Report BtoB", "Report NtoN")
def PermMsgReserved:Seq[String] = Seq("Reserved")
}
object TLAtomics
{
val width = 3
// Arithmetic types
def MIN = 0.U(width.W)
def MAX = 1.U(width.W)
def MINU = 2.U(width.W)
def MAXU = 3.U(width.W)
def ADD = 4.U(width.W)
def isArithmetic(x: UInt) = x <= ADD
// Logical types
def XOR = 0.U(width.W)
def OR = 1.U(width.W)
def AND = 2.U(width.W)
def SWAP = 3.U(width.W)
def isLogical(x: UInt) = x <= SWAP
def ArithMsg:Seq[String] = Seq("MIN", "MAX", "MINU", "MAXU", "ADD")
def LogicMsg:Seq[String] = Seq("XOR", "OR", "AND", "SWAP")
}
object TLHints
{
val width = 1
def PREFETCH_READ = 0.U(width.W)
def PREFETCH_WRITE = 1.U(width.W)
def isHints(x: UInt) = x <= PREFETCH_WRITE
def HintsMsg:Seq[String] = Seq("PrefetchRead", "PrefetchWrite")
}
sealed trait TLChannel extends TLBundleBase {
val channelName: String
}
sealed trait TLDataChannel extends TLChannel
sealed trait TLAddrChannel extends TLDataChannel
final class TLBundleA(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleA_${params.shortName}"
val channelName = "'A' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(List(TLAtomics.width, TLPermissions.aWidth, TLHints.width).max.W) // amo_opcode || grow perms || hint
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // from
val address = UInt(params.addressBits.W) // to
val user = BundleMap(params.requestFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val mask = UInt((params.dataBits/8).W)
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleB(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleB_${params.shortName}"
val channelName = "'B' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.bdWidth.W) // cap perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // to
val address = UInt(params.addressBits.W) // from
// variable fields during multibeat:
val mask = UInt((params.dataBits/8).W)
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleC(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleC_${params.shortName}"
val channelName = "'C' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.cWidth.W) // shrink or report perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // from
val address = UInt(params.addressBits.W) // to
val user = BundleMap(params.requestFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleD(params: TLBundleParameters)
extends TLBundleBase(params) with TLDataChannel
{
override def typeName = s"TLBundleD_${params.shortName}"
val channelName = "'D' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.bdWidth.W) // cap perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // to
val sink = UInt(params.sinkBits.W) // from
val denied = Bool() // implies corrupt iff *Data
val user = BundleMap(params.responseFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleE(params: TLBundleParameters)
extends TLBundleBase(params) with TLChannel
{
override def typeName = s"TLBundleE_${params.shortName}"
val channelName = "'E' channel"
val sink = UInt(params.sinkBits.W) // to
}
class TLBundle(val params: TLBundleParameters) extends Record
{
// Emulate a Bundle with elements abcde or ad depending on params.hasBCE
private val optA = Some (Decoupled(new TLBundleA(params)))
private val optB = params.hasBCE.option(Flipped(Decoupled(new TLBundleB(params))))
private val optC = params.hasBCE.option(Decoupled(new TLBundleC(params)))
private val optD = Some (Flipped(Decoupled(new TLBundleD(params))))
private val optE = params.hasBCE.option(Decoupled(new TLBundleE(params)))
def a: DecoupledIO[TLBundleA] = optA.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleA(params)))))
def b: DecoupledIO[TLBundleB] = optB.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleB(params)))))
def c: DecoupledIO[TLBundleC] = optC.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleC(params)))))
def d: DecoupledIO[TLBundleD] = optD.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleD(params)))))
def e: DecoupledIO[TLBundleE] = optE.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleE(params)))))
val elements =
if (params.hasBCE) ListMap("e" -> e, "d" -> d, "c" -> c, "b" -> b, "a" -> a)
else ListMap("d" -> d, "a" -> a)
def tieoff(): Unit = {
DataMirror.specifiedDirectionOf(a.ready) match {
case SpecifiedDirection.Input =>
a.ready := false.B
c.ready := false.B
e.ready := false.B
b.valid := false.B
d.valid := false.B
case SpecifiedDirection.Output =>
a.valid := false.B
c.valid := false.B
e.valid := false.B
b.ready := false.B
d.ready := false.B
case _ =>
}
}
}
object TLBundle
{
def apply(params: TLBundleParameters) = new TLBundle(params)
}
class TLAsyncBundleBase(val params: TLAsyncBundleParameters) extends Bundle
class TLAsyncBundle(params: TLAsyncBundleParameters) extends TLAsyncBundleBase(params)
{
val a = new AsyncBundle(new TLBundleA(params.base), params.async)
val b = Flipped(new AsyncBundle(new TLBundleB(params.base), params.async))
val c = new AsyncBundle(new TLBundleC(params.base), params.async)
val d = Flipped(new AsyncBundle(new TLBundleD(params.base), params.async))
val e = new AsyncBundle(new TLBundleE(params.base), params.async)
}
class TLRationalBundle(params: TLBundleParameters) extends TLBundleBase(params)
{
val a = RationalIO(new TLBundleA(params))
val b = Flipped(RationalIO(new TLBundleB(params)))
val c = RationalIO(new TLBundleC(params))
val d = Flipped(RationalIO(new TLBundleD(params)))
val e = RationalIO(new TLBundleE(params))
}
class TLCreditedBundle(params: TLBundleParameters) extends TLBundleBase(params)
{
val a = CreditedIO(new TLBundleA(params))
val b = Flipped(CreditedIO(new TLBundleB(params)))
val c = CreditedIO(new TLBundleC(params))
val d = Flipped(CreditedIO(new TLBundleD(params)))
val e = CreditedIO(new TLBundleE(params))
}
File Parameters.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.diplomacy
import chisel3._
import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor}
import freechips.rocketchip.util.ShiftQueue
/** Options for describing the attributes of memory regions */
object RegionType {
// Define the 'more relaxed than' ordering
val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS)
sealed trait T extends Ordered[T] {
def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this)
}
case object CACHED extends T // an intermediate agent may have cached a copy of the region for you
case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided
case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible
case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached
case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects
case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed
case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively
}
// A non-empty half-open range; [start, end)
case class IdRange(start: Int, end: Int) extends Ordered[IdRange]
{
require (start >= 0, s"Ids cannot be negative, but got: $start.")
require (start <= end, "Id ranges cannot be negative.")
def compare(x: IdRange) = {
val primary = (this.start - x.start).signum
val secondary = (x.end - this.end).signum
if (primary != 0) primary else secondary
}
def overlaps(x: IdRange) = start < x.end && x.start < end
def contains(x: IdRange) = start <= x.start && x.end <= end
def contains(x: Int) = start <= x && x < end
def contains(x: UInt) =
if (size == 0) {
false.B
} else if (size == 1) { // simple comparison
x === start.U
} else {
// find index of largest different bit
val largestDeltaBit = log2Floor(start ^ (end-1))
val smallestCommonBit = largestDeltaBit + 1 // may not exist in x
val uncommonMask = (1 << smallestCommonBit) - 1
val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0)
// the prefix must match exactly (note: may shift ALL bits away)
(x >> smallestCommonBit) === (start >> smallestCommonBit).U &&
// firrtl constant prop range analysis can eliminate these two:
(start & uncommonMask).U <= uncommonBits &&
uncommonBits <= ((end-1) & uncommonMask).U
}
def shift(x: Int) = IdRange(start+x, end+x)
def size = end - start
def isEmpty = end == start
def range = start until end
}
object IdRange
{
def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else {
val ranges = s.sorted
(ranges.tail zip ranges.init) find { case (a, b) => a overlaps b }
}
}
// An potentially empty inclusive range of 2-powers [min, max] (in bytes)
case class TransferSizes(min: Int, max: Int)
{
def this(x: Int) = this(x, x)
require (min <= max, s"Min transfer $min > max transfer $max")
require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)")
require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max")
require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min")
require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)")
def none = min == 0
def contains(x: Int) = isPow2(x) && min <= x && x <= max
def containsLg(x: Int) = contains(1 << x)
def containsLg(x: UInt) =
if (none) false.B
else if (min == max) { log2Ceil(min).U === x }
else { log2Ceil(min).U <= x && x <= log2Ceil(max).U }
def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max)
def intersect(x: TransferSizes) =
if (x.max < min || max < x.min) TransferSizes.none
else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max))
// Not a union, because the result may contain sizes contained by neither term
// NOT TO BE CONFUSED WITH COVERPOINTS
def mincover(x: TransferSizes) = {
if (none) {
x
} else if (x.none) {
this
} else {
TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max))
}
}
override def toString() = "TransferSizes[%d, %d]".format(min, max)
}
object TransferSizes {
def apply(x: Int) = new TransferSizes(x)
val none = new TransferSizes(0)
def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _)
def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _)
implicit def asBool(x: TransferSizes) = !x.none
}
// AddressSets specify the address space managed by the manager
// Base is the base address, and mask are the bits consumed by the manager
// e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff
// e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ...
case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet]
{
// Forbid misaligned base address (and empty sets)
require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}")
require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous
// We do allow negative mask (=> ignore all high bits)
def contains(x: BigInt) = ((x ^ base) & ~mask) == 0
def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S
// turn x into an address contained in this set
def legalize(x: UInt): UInt = base.U | (mask.U & x)
// overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1)
def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0
// contains iff bitwise: x.mask => mask && contains(x.base)
def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0
// The number of bytes to which the manager must be aligned
def alignment = ((mask + 1) & ~mask)
// Is this a contiguous memory range
def contiguous = alignment == mask+1
def finite = mask >= 0
def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask }
// Widen the match function to ignore all bits in imask
def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask)
// Return an AddressSet that only contains the addresses both sets contain
def intersect(x: AddressSet): Option[AddressSet] = {
if (!overlaps(x)) {
None
} else {
val r_mask = mask & x.mask
val r_base = base | x.base
Some(AddressSet(r_base, r_mask))
}
}
def subtract(x: AddressSet): Seq[AddressSet] = {
intersect(x) match {
case None => Seq(this)
case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit =>
val nmask = (mask & (bit-1)) | remove.mask
val nbase = (remove.base ^ bit) & ~nmask
AddressSet(nbase, nmask)
}
}
}
// AddressSets have one natural Ordering (the containment order, if contiguous)
def compare(x: AddressSet) = {
val primary = (this.base - x.base).signum // smallest address first
val secondary = (x.mask - this.mask).signum // largest mask first
if (primary != 0) primary else secondary
}
// We always want to see things in hex
override def toString() = {
if (mask >= 0) {
"AddressSet(0x%x, 0x%x)".format(base, mask)
} else {
"AddressSet(0x%x, ~0x%x)".format(base, ~mask)
}
}
def toRanges = {
require (finite, "Ranges cannot be calculated on infinite mask")
val size = alignment
val fragments = mask & ~(size-1)
val bits = bitIndexes(fragments)
(BigInt(0) until (BigInt(1) << bits.size)).map { i =>
val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) }
AddressRange(off, size)
}
}
}
object AddressSet
{
val everything = AddressSet(0, -1)
def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = {
if (size == 0) tail.reverse else {
val maxBaseAlignment = base & (-base) // 0 for infinite (LSB)
val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size
val step =
if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment)
maxSizeAlignment else maxBaseAlignment
misaligned(base+step, size-step, AddressSet(base, step-1) +: tail)
}
}
def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = {
// Pair terms up by ignoring 'bit'
seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) =>
if (seq.size == 1) {
seq.head // singleton -> unaffected
} else {
key.copy(mask = key.mask | bit) // pair - widen mask by bit
}
}.toList
}
def unify(seq: Seq[AddressSet]): Seq[AddressSet] = {
val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _)
AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted
}
def enumerateMask(mask: BigInt): Seq[BigInt] = {
def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] =
if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail)
helper(0, Nil)
}
def enumerateBits(mask: BigInt): Seq[BigInt] = {
def helper(x: BigInt): Seq[BigInt] = {
if (x == 0) {
Nil
} else {
val bit = x & (-x)
bit +: helper(x & ~bit)
}
}
helper(mask)
}
}
case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean)
{
require (depth >= 0, "Buffer depth must be >= 0")
def isDefined = depth > 0
def latency = if (isDefined && !flow) 1 else 0
def apply[T <: Data](x: DecoupledIO[T]) =
if (isDefined) Queue(x, depth, flow=flow, pipe=pipe)
else x
def irrevocable[T <: Data](x: ReadyValidIO[T]) =
if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe)
else x
def sq[T <: Data](x: DecoupledIO[T]) =
if (!isDefined) x else {
val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe))
sq.io.enq <> x
sq.io.deq
}
override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "")
}
object BufferParams
{
implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false)
val default = BufferParams(2)
val none = BufferParams(0)
val flow = BufferParams(1, true, false)
val pipe = BufferParams(1, false, true)
}
case class TriStateValue(value: Boolean, set: Boolean)
{
def update(orig: Boolean) = if (set) value else orig
}
object TriStateValue
{
implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true)
def unset = TriStateValue(false, false)
}
trait DirectedBuffers[T] {
def copyIn(x: BufferParams): T
def copyOut(x: BufferParams): T
def copyInOut(x: BufferParams): T
}
trait IdMapEntry {
def name: String
def from: IdRange
def to: IdRange
def isCache: Boolean
def requestFifo: Boolean
def maxTransactionsInFlight: Option[Int]
def pretty(fmt: String) =
if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5
fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
} else {
fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
}
}
abstract class IdMap[T <: IdMapEntry] {
protected val fmt: String
val mapping: Seq[T]
def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n")
}
File Edges.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
class TLEdge(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdgeParameters(client, manager, params, sourceInfo)
{
def isAligned(address: UInt, lgSize: UInt): Bool = {
if (maxLgSize == 0) true.B else {
val mask = UIntToOH1(lgSize, maxLgSize)
(address & mask) === 0.U
}
}
def mask(address: UInt, lgSize: UInt): UInt =
MaskGen(address, lgSize, manager.beatBytes)
def staticHasData(bundle: TLChannel): Option[Boolean] = {
bundle match {
case _:TLBundleA => {
// Do there exist A messages with Data?
val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial
// Do there exist A messages without Data?
val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint
// Statically optimize the case where hasData is a constant
if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None
}
case _:TLBundleB => {
// Do there exist B messages with Data?
val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial
// Do there exist B messages without Data?
val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint
// Statically optimize the case where hasData is a constant
if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None
}
case _:TLBundleC => {
// Do there eixst C messages with Data?
val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe
// Do there exist C messages without Data?
val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe
if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None
}
case _:TLBundleD => {
// Do there eixst D messages with Data?
val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB
// Do there exist D messages without Data?
val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT
if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None
}
case _:TLBundleE => Some(false)
}
}
def isRequest(x: TLChannel): Bool = {
x match {
case a: TLBundleA => true.B
case b: TLBundleB => true.B
case c: TLBundleC => c.opcode(2) && c.opcode(1)
// opcode === TLMessages.Release ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(2) && !d.opcode(1)
// opcode === TLMessages.Grant ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
}
def isResponse(x: TLChannel): Bool = {
x match {
case a: TLBundleA => false.B
case b: TLBundleB => false.B
case c: TLBundleC => !c.opcode(2) || !c.opcode(1)
// opcode =/= TLMessages.Release &&
// opcode =/= TLMessages.ReleaseData
case d: TLBundleD => true.B // Grant isResponse + isRequest
case e: TLBundleE => true.B
}
}
def hasData(x: TLChannel): Bool = {
val opdata = x match {
case a: TLBundleA => !a.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case b: TLBundleB => !b.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case c: TLBundleC => c.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.ProbeAckData ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
staticHasData(x).map(_.B).getOrElse(opdata)
}
def opcode(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.opcode
case b: TLBundleB => b.opcode
case c: TLBundleC => c.opcode
case d: TLBundleD => d.opcode
}
}
def param(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.param
case b: TLBundleB => b.param
case c: TLBundleC => c.param
case d: TLBundleD => d.param
}
}
def size(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.size
case b: TLBundleB => b.size
case c: TLBundleC => c.size
case d: TLBundleD => d.size
}
}
def data(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.data
case b: TLBundleB => b.data
case c: TLBundleC => c.data
case d: TLBundleD => d.data
}
}
def corrupt(x: TLDataChannel): Bool = {
x match {
case a: TLBundleA => a.corrupt
case b: TLBundleB => b.corrupt
case c: TLBundleC => c.corrupt
case d: TLBundleD => d.corrupt
}
}
def mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.mask
case b: TLBundleB => b.mask
case c: TLBundleC => mask(c.address, c.size)
}
}
def full_mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => mask(a.address, a.size)
case b: TLBundleB => mask(b.address, b.size)
case c: TLBundleC => mask(c.address, c.size)
}
}
def address(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.address
case b: TLBundleB => b.address
case c: TLBundleC => c.address
}
}
def source(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.source
case b: TLBundleB => b.source
case c: TLBundleC => c.source
case d: TLBundleD => d.source
}
}
def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes)
def addr_lo(x: UInt): UInt =
if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0)
def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x))
def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x))
def numBeats(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 1.U
case bundle: TLDataChannel => {
val hasData = this.hasData(bundle)
val size = this.size(bundle)
val cutoff = log2Ceil(manager.beatBytes)
val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U
val decode = UIntToOH(size, maxLgSize+1) >> cutoff
Mux(hasData, decode | small.asUInt, 1.U)
}
}
}
def numBeats1(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 0.U
case bundle: TLDataChannel => {
if (maxLgSize == 0) {
0.U
} else {
val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes)
Mux(hasData(bundle), decode, 0.U)
}
}
}
}
def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val beats1 = numBeats1(bits)
val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W))
val counter1 = counter - 1.U
val first = counter === 0.U
val last = counter === 1.U || beats1 === 0.U
val done = last && fire
val count = (beats1 & ~counter1)
when (fire) {
counter := Mux(first, beats1, counter1)
}
(first, last, done, count)
}
def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1
def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire)
def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid)
def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2
def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire)
def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid)
def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3
def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire)
def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid)
def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3)
}
def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire)
def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid)
def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4)
}
def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire)
def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid)
def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes))
}
def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire)
def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid)
// Does the request need T permissions to be executed?
def needT(a: TLBundleA): Bool = {
val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLPermissions.NtoB -> false.B,
TLPermissions.NtoT -> true.B,
TLPermissions.BtoT -> true.B))
MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array(
TLMessages.PutFullData -> true.B,
TLMessages.PutPartialData -> true.B,
TLMessages.ArithmeticData -> true.B,
TLMessages.LogicalData -> true.B,
TLMessages.Get -> false.B,
TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLHints.PREFETCH_READ -> false.B,
TLHints.PREFETCH_WRITE -> true.B)),
TLMessages.AcquireBlock -> acq_needT,
TLMessages.AcquirePerm -> acq_needT))
}
// This is a very expensive circuit; use only if you really mean it!
def inFlight(x: TLBundle): (UInt, UInt) = {
val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W))
val bce = manager.anySupportAcquireB && client.anySupportProbe
val (a_first, a_last, _) = firstlast(x.a)
val (b_first, b_last, _) = firstlast(x.b)
val (c_first, c_last, _) = firstlast(x.c)
val (d_first, d_last, _) = firstlast(x.d)
val (e_first, e_last, _) = firstlast(x.e)
val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits))
val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits))
val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits))
val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits))
val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits))
val a_inc = x.a.fire && a_first && a_request
val b_inc = x.b.fire && b_first && b_request
val c_inc = x.c.fire && c_first && c_request
val d_inc = x.d.fire && d_first && d_request
val e_inc = x.e.fire && e_first && e_request
val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil))
val a_dec = x.a.fire && a_last && a_response
val b_dec = x.b.fire && b_last && b_response
val c_dec = x.c.fire && c_last && c_response
val d_dec = x.d.fire && d_last && d_response
val e_dec = x.e.fire && e_last && e_response
val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil))
val next_flight = flight + PopCount(inc) - PopCount(dec)
flight := next_flight
(flight, next_flight)
}
def prettySourceMapping(context: String): String = {
s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n"
}
}
class TLEdgeOut(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
// Transfers
def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquireBlock
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquirePerm
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.Release
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ReleaseData
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) =
Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B)
def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAck
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions, data)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAckData
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B)
def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink)
def GrantAck(toSink: UInt): TLBundleE = {
val e = Wire(new TLBundleE(bundle))
e.sink := toSink
e
}
// Accesses
def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsGetFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Get
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutFullFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutFullData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, mask, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutPartialFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutPartialData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = {
require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsArithmeticFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.ArithmeticData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsLogicalFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.LogicalData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = {
require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsHintFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Hint
a.param := param
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data)
def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAckData
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size)
def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.HintAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
}
class TLEdgeIn(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = {
val todo = x.filter(!_.isEmpty)
val heads = todo.map(_.head)
val tails = todo.map(_.tail)
if (todo.isEmpty) Nil else { heads +: myTranspose(tails) }
}
// Transfers
def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = {
require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}")
val legal = client.supportsProbe(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Probe
b.param := capPermissions
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.Grant
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.GrantData
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B)
def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.ReleaseAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
// Accesses
def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = {
require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsGet(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Get
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsPutFull(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutFullData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, mask, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsPutPartial(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutPartialData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsArithmetic(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.ArithmeticData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsLogical(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.LogicalData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = {
require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsHint(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Hint
b.param := param
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size)
def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied)
def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data)
def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAckData
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B)
def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied)
def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B)
def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.HintAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
}
| module TLMonitor_88( // @[Monitor.scala:36:7]
input clock, // @[Monitor.scala:36:7]
input reset, // @[Monitor.scala:36:7]
input io_in_a_ready, // @[Monitor.scala:20:14]
input io_in_a_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14]
input [1:0] io_in_a_bits_size, // @[Monitor.scala:20:14]
input [10:0] io_in_a_bits_source, // @[Monitor.scala:20:14]
input [20:0] io_in_a_bits_address, // @[Monitor.scala:20:14]
input [7:0] io_in_a_bits_mask, // @[Monitor.scala:20:14]
input [63:0] io_in_a_bits_data, // @[Monitor.scala:20:14]
input io_in_a_bits_corrupt, // @[Monitor.scala:20:14]
input io_in_d_ready, // @[Monitor.scala:20:14]
input io_in_d_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14]
input [1:0] io_in_d_bits_size, // @[Monitor.scala:20:14]
input [10:0] io_in_d_bits_source, // @[Monitor.scala:20:14]
input [63:0] io_in_d_bits_data // @[Monitor.scala:20:14]
);
wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11]
wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11]
wire io_in_a_ready_0 = io_in_a_ready; // @[Monitor.scala:36:7]
wire io_in_a_valid_0 = io_in_a_valid; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_opcode_0 = io_in_a_bits_opcode; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_param_0 = io_in_a_bits_param; // @[Monitor.scala:36:7]
wire [1:0] io_in_a_bits_size_0 = io_in_a_bits_size; // @[Monitor.scala:36:7]
wire [10:0] io_in_a_bits_source_0 = io_in_a_bits_source; // @[Monitor.scala:36:7]
wire [20:0] io_in_a_bits_address_0 = io_in_a_bits_address; // @[Monitor.scala:36:7]
wire [7:0] io_in_a_bits_mask_0 = io_in_a_bits_mask; // @[Monitor.scala:36:7]
wire [63:0] io_in_a_bits_data_0 = io_in_a_bits_data; // @[Monitor.scala:36:7]
wire io_in_a_bits_corrupt_0 = io_in_a_bits_corrupt; // @[Monitor.scala:36:7]
wire io_in_d_ready_0 = io_in_d_ready; // @[Monitor.scala:36:7]
wire io_in_d_valid_0 = io_in_d_valid; // @[Monitor.scala:36:7]
wire [2:0] io_in_d_bits_opcode_0 = io_in_d_bits_opcode; // @[Monitor.scala:36:7]
wire [1:0] io_in_d_bits_size_0 = io_in_d_bits_size; // @[Monitor.scala:36:7]
wire [10:0] io_in_d_bits_source_0 = io_in_d_bits_source; // @[Monitor.scala:36:7]
wire [63:0] io_in_d_bits_data_0 = io_in_d_bits_data; // @[Monitor.scala:36:7]
wire io_in_d_bits_sink = 1'h0; // @[Monitor.scala:36:7]
wire io_in_d_bits_denied = 1'h0; // @[Monitor.scala:36:7]
wire io_in_d_bits_corrupt = 1'h0; // @[Monitor.scala:36:7]
wire _source_ok_T = 1'h0; // @[Parameters.scala:54:10]
wire _source_ok_T_6 = 1'h0; // @[Parameters.scala:54:10]
wire sink_ok = 1'h0; // @[Monitor.scala:309:31]
wire a_first_beats1_decode = 1'h0; // @[Edges.scala:220:59]
wire a_first_beats1 = 1'h0; // @[Edges.scala:221:14]
wire a_first_count = 1'h0; // @[Edges.scala:234:25]
wire d_first_beats1_decode = 1'h0; // @[Edges.scala:220:59]
wire d_first_beats1 = 1'h0; // @[Edges.scala:221:14]
wire d_first_count = 1'h0; // @[Edges.scala:234:25]
wire a_first_beats1_decode_1 = 1'h0; // @[Edges.scala:220:59]
wire a_first_beats1_1 = 1'h0; // @[Edges.scala:221:14]
wire a_first_count_1 = 1'h0; // @[Edges.scala:234:25]
wire d_first_beats1_decode_1 = 1'h0; // @[Edges.scala:220:59]
wire d_first_beats1_1 = 1'h0; // @[Edges.scala:221:14]
wire d_first_count_1 = 1'h0; // @[Edges.scala:234:25]
wire _c_first_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_T = 1'h0; // @[Decoupled.scala:51:35]
wire c_first_beats1_decode = 1'h0; // @[Edges.scala:220:59]
wire c_first_beats1_opdata = 1'h0; // @[Edges.scala:102:36]
wire c_first_beats1 = 1'h0; // @[Edges.scala:221:14]
wire _c_first_last_T = 1'h0; // @[Edges.scala:232:25]
wire c_first_done = 1'h0; // @[Edges.scala:233:22]
wire _c_first_count_T = 1'h0; // @[Edges.scala:234:27]
wire c_first_count = 1'h0; // @[Edges.scala:234:25]
wire _c_first_counter_T = 1'h0; // @[Edges.scala:236:21]
wire d_first_beats1_decode_2 = 1'h0; // @[Edges.scala:220:59]
wire d_first_beats1_2 = 1'h0; // @[Edges.scala:221:14]
wire d_first_count_2 = 1'h0; // @[Edges.scala:234:25]
wire _c_set_wo_ready_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_wo_ready_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_wo_ready_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_T = 1'h0; // @[Monitor.scala:772:47]
wire _c_probe_ack_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_T_1 = 1'h0; // @[Monitor.scala:772:95]
wire c_probe_ack = 1'h0; // @[Monitor.scala:772:71]
wire _same_cycle_resp_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_T_3 = 1'h0; // @[Monitor.scala:795:44]
wire _same_cycle_resp_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_T_4 = 1'h0; // @[Edges.scala:68:36]
wire _same_cycle_resp_T_5 = 1'h0; // @[Edges.scala:68:51]
wire _same_cycle_resp_T_6 = 1'h0; // @[Edges.scala:68:40]
wire _same_cycle_resp_T_7 = 1'h0; // @[Monitor.scala:795:55]
wire _same_cycle_resp_WIRE_4_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_4_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_4_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_5_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_5_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_5_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire same_cycle_resp_1 = 1'h0; // @[Monitor.scala:795:88]
wire _source_ok_T_1 = 1'h1; // @[Parameters.scala:54:32]
wire _source_ok_T_2 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_3 = 1'h1; // @[Parameters.scala:54:67]
wire _source_ok_T_7 = 1'h1; // @[Parameters.scala:54:32]
wire _source_ok_T_8 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_9 = 1'h1; // @[Parameters.scala:54:67]
wire _a_first_last_T_1 = 1'h1; // @[Edges.scala:232:43]
wire a_first_last = 1'h1; // @[Edges.scala:232:33]
wire _d_first_last_T_1 = 1'h1; // @[Edges.scala:232:43]
wire d_first_last = 1'h1; // @[Edges.scala:232:33]
wire _a_first_last_T_3 = 1'h1; // @[Edges.scala:232:43]
wire a_first_last_1 = 1'h1; // @[Edges.scala:232:33]
wire _d_first_last_T_3 = 1'h1; // @[Edges.scala:232:43]
wire d_first_last_1 = 1'h1; // @[Edges.scala:232:33]
wire c_first_counter1 = 1'h1; // @[Edges.scala:230:28]
wire c_first = 1'h1; // @[Edges.scala:231:25]
wire _c_first_last_T_1 = 1'h1; // @[Edges.scala:232:43]
wire c_first_last = 1'h1; // @[Edges.scala:232:33]
wire _d_first_last_T_5 = 1'h1; // @[Edges.scala:232:43]
wire d_first_last_2 = 1'h1; // @[Edges.scala:232:33]
wire [1:0] _c_first_counter1_T = 2'h3; // @[Edges.scala:230:28]
wire [1:0] io_in_d_bits_param = 2'h0; // @[Monitor.scala:36:7]
wire [1:0] _c_first_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_first_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _c_first_WIRE_2_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_first_WIRE_3_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _c_set_wo_ready_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_set_wo_ready_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _c_set_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_set_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _c_opcodes_set_interm_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_opcodes_set_interm_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _c_sizes_set_interm_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_sizes_set_interm_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _c_opcodes_set_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_opcodes_set_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _c_sizes_set_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_sizes_set_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _c_probe_ack_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_probe_ack_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _c_probe_ack_WIRE_2_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_probe_ack_WIRE_3_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _same_cycle_resp_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _same_cycle_resp_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _same_cycle_resp_WIRE_2_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _same_cycle_resp_WIRE_3_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _same_cycle_resp_WIRE_4_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _same_cycle_resp_WIRE_5_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_first_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_first_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_first_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_first_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_set_wo_ready_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_set_wo_ready_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_opcodes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_opcodes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_sizes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_sizes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_opcodes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_opcodes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_sizes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_sizes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_probe_ack_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_probe_ack_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_probe_ack_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_probe_ack_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_4_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_5_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_first_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_first_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_first_WIRE_2_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_first_WIRE_3_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_set_wo_ready_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_set_wo_ready_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_set_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_set_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_opcodes_set_interm_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_opcodes_set_interm_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_sizes_set_interm_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_sizes_set_interm_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_opcodes_set_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_opcodes_set_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_sizes_set_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_sizes_set_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_probe_ack_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_probe_ack_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_probe_ack_WIRE_2_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_probe_ack_WIRE_3_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _same_cycle_resp_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _same_cycle_resp_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _same_cycle_resp_WIRE_2_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _same_cycle_resp_WIRE_3_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _same_cycle_resp_WIRE_4_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _same_cycle_resp_WIRE_5_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [10:0] _c_first_WIRE_bits_source = 11'h0; // @[Bundles.scala:265:74]
wire [10:0] _c_first_WIRE_1_bits_source = 11'h0; // @[Bundles.scala:265:61]
wire [10:0] _c_first_WIRE_2_bits_source = 11'h0; // @[Bundles.scala:265:74]
wire [10:0] _c_first_WIRE_3_bits_source = 11'h0; // @[Bundles.scala:265:61]
wire [10:0] _c_set_wo_ready_WIRE_bits_source = 11'h0; // @[Bundles.scala:265:74]
wire [10:0] _c_set_wo_ready_WIRE_1_bits_source = 11'h0; // @[Bundles.scala:265:61]
wire [10:0] _c_set_WIRE_bits_source = 11'h0; // @[Bundles.scala:265:74]
wire [10:0] _c_set_WIRE_1_bits_source = 11'h0; // @[Bundles.scala:265:61]
wire [10:0] _c_opcodes_set_interm_WIRE_bits_source = 11'h0; // @[Bundles.scala:265:74]
wire [10:0] _c_opcodes_set_interm_WIRE_1_bits_source = 11'h0; // @[Bundles.scala:265:61]
wire [10:0] _c_sizes_set_interm_WIRE_bits_source = 11'h0; // @[Bundles.scala:265:74]
wire [10:0] _c_sizes_set_interm_WIRE_1_bits_source = 11'h0; // @[Bundles.scala:265:61]
wire [10:0] _c_opcodes_set_WIRE_bits_source = 11'h0; // @[Bundles.scala:265:74]
wire [10:0] _c_opcodes_set_WIRE_1_bits_source = 11'h0; // @[Bundles.scala:265:61]
wire [10:0] _c_sizes_set_WIRE_bits_source = 11'h0; // @[Bundles.scala:265:74]
wire [10:0] _c_sizes_set_WIRE_1_bits_source = 11'h0; // @[Bundles.scala:265:61]
wire [10:0] _c_probe_ack_WIRE_bits_source = 11'h0; // @[Bundles.scala:265:74]
wire [10:0] _c_probe_ack_WIRE_1_bits_source = 11'h0; // @[Bundles.scala:265:61]
wire [10:0] _c_probe_ack_WIRE_2_bits_source = 11'h0; // @[Bundles.scala:265:74]
wire [10:0] _c_probe_ack_WIRE_3_bits_source = 11'h0; // @[Bundles.scala:265:61]
wire [10:0] _same_cycle_resp_WIRE_bits_source = 11'h0; // @[Bundles.scala:265:74]
wire [10:0] _same_cycle_resp_WIRE_1_bits_source = 11'h0; // @[Bundles.scala:265:61]
wire [10:0] _same_cycle_resp_WIRE_2_bits_source = 11'h0; // @[Bundles.scala:265:74]
wire [10:0] _same_cycle_resp_WIRE_3_bits_source = 11'h0; // @[Bundles.scala:265:61]
wire [10:0] _same_cycle_resp_WIRE_4_bits_source = 11'h0; // @[Bundles.scala:265:74]
wire [10:0] _same_cycle_resp_WIRE_5_bits_source = 11'h0; // @[Bundles.scala:265:61]
wire [2:0] responseMap_0 = 3'h0; // @[Monitor.scala:643:42]
wire [2:0] responseMap_1 = 3'h0; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_0 = 3'h0; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_1 = 3'h0; // @[Monitor.scala:644:42]
wire [2:0] _c_first_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_beats1_decode_T_2 = 3'h0; // @[package.scala:243:46]
wire [2:0] c_sizes_set_interm = 3'h0; // @[Monitor.scala:755:40]
wire [2:0] _c_set_wo_ready_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_T = 3'h0; // @[Monitor.scala:766:51]
wire [2:0] _c_opcodes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_4_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_4_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_5_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_5_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [15:0] _a_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _a_size_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _d_opcodes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _d_sizes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _c_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _c_size_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _d_opcodes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _d_sizes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57]
wire [16:0] _a_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _a_size_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _d_opcodes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _d_sizes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _c_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _c_size_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _d_opcodes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _d_sizes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57]
wire [15:0] _a_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _a_size_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _d_opcodes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _d_sizes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _c_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _c_size_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _d_opcodes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _d_sizes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51]
wire [16385:0] _c_sizes_set_T_1 = 16386'h0; // @[Monitor.scala:768:52]
wire [13:0] _c_opcodes_set_T = 14'h0; // @[Monitor.scala:767:79]
wire [13:0] _c_sizes_set_T = 14'h0; // @[Monitor.scala:768:77]
wire [16386:0] _c_opcodes_set_T_1 = 16387'h0; // @[Monitor.scala:767:54]
wire [2:0] responseMap_2 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMap_3 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMap_4 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_2 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_3 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_4 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] _c_sizes_set_interm_T_1 = 3'h1; // @[Monitor.scala:766:59]
wire [3:0] _c_opcodes_set_interm_T_1 = 4'h1; // @[Monitor.scala:765:61]
wire [3:0] c_opcodes_set_interm = 4'h0; // @[Monitor.scala:754:40]
wire [3:0] _c_opcodes_set_interm_T = 4'h0; // @[Monitor.scala:765:53]
wire [2047:0] _c_set_wo_ready_T = 2048'h1; // @[OneHot.scala:58:35]
wire [2047:0] _c_set_T = 2048'h1; // @[OneHot.scala:58:35]
wire [4159:0] c_opcodes_set = 4160'h0; // @[Monitor.scala:740:34]
wire [4159:0] c_sizes_set = 4160'h0; // @[Monitor.scala:741:34]
wire [1039:0] c_set = 1040'h0; // @[Monitor.scala:738:34]
wire [1039:0] c_set_wo_ready = 1040'h0; // @[Monitor.scala:739:34]
wire [2:0] _c_first_beats1_decode_T_1 = 3'h7; // @[package.scala:243:76]
wire [5:0] _c_first_beats1_decode_T = 6'h7; // @[package.scala:243:71]
wire [2:0] responseMap_6 = 3'h4; // @[Monitor.scala:643:42]
wire [2:0] responseMap_7 = 3'h4; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_7 = 3'h4; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_6 = 3'h5; // @[Monitor.scala:644:42]
wire [2:0] responseMap_5 = 3'h2; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_5 = 3'h2; // @[Monitor.scala:644:42]
wire [3:0] _a_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:637:123]
wire [3:0] _a_size_lookup_T_2 = 4'h4; // @[Monitor.scala:641:117]
wire [3:0] _d_opcodes_clr_T = 4'h4; // @[Monitor.scala:680:48]
wire [3:0] _d_sizes_clr_T = 4'h4; // @[Monitor.scala:681:48]
wire [3:0] _c_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:749:123]
wire [3:0] _c_size_lookup_T_2 = 4'h4; // @[Monitor.scala:750:119]
wire [3:0] _d_opcodes_clr_T_6 = 4'h4; // @[Monitor.scala:790:48]
wire [3:0] _d_sizes_clr_T_6 = 4'h4; // @[Monitor.scala:791:48]
wire [10:0] _source_ok_uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [10:0] _uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [10:0] _uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [10:0] _uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [10:0] _uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [10:0] _uncommonBits_T_4 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [10:0] _uncommonBits_T_5 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [10:0] _uncommonBits_T_6 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [10:0] _uncommonBits_T_7 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [10:0] _uncommonBits_T_8 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [10:0] _source_ok_uncommonBits_T_1 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [10:0] source_ok_uncommonBits = _source_ok_uncommonBits_T; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_4 = source_ok_uncommonBits < 11'h410; // @[Parameters.scala:52:56, :57:20]
wire _source_ok_T_5 = _source_ok_T_4; // @[Parameters.scala:56:48, :57:20]
wire _source_ok_WIRE_0 = _source_ok_T_5; // @[Parameters.scala:1138:31]
wire [5:0] _GEN = 6'h7 << io_in_a_bits_size_0; // @[package.scala:243:71]
wire [5:0] _is_aligned_mask_T; // @[package.scala:243:71]
assign _is_aligned_mask_T = _GEN; // @[package.scala:243:71]
wire [5:0] _a_first_beats1_decode_T; // @[package.scala:243:71]
assign _a_first_beats1_decode_T = _GEN; // @[package.scala:243:71]
wire [5:0] _a_first_beats1_decode_T_3; // @[package.scala:243:71]
assign _a_first_beats1_decode_T_3 = _GEN; // @[package.scala:243:71]
wire [2:0] _is_aligned_mask_T_1 = _is_aligned_mask_T[2:0]; // @[package.scala:243:{71,76}]
wire [2:0] is_aligned_mask = ~_is_aligned_mask_T_1; // @[package.scala:243:{46,76}]
wire [20:0] _is_aligned_T = {18'h0, io_in_a_bits_address_0[2:0] & is_aligned_mask}; // @[package.scala:243:46]
wire is_aligned = _is_aligned_T == 21'h0; // @[Edges.scala:21:{16,24}]
wire [2:0] _mask_sizeOH_T = {1'h0, io_in_a_bits_size_0}; // @[Misc.scala:202:34]
wire [1:0] mask_sizeOH_shiftAmount = _mask_sizeOH_T[1:0]; // @[OneHot.scala:64:49]
wire [3:0] _mask_sizeOH_T_1 = 4'h1 << mask_sizeOH_shiftAmount; // @[OneHot.scala:64:49, :65:12]
wire [2:0] _mask_sizeOH_T_2 = _mask_sizeOH_T_1[2:0]; // @[OneHot.scala:65:{12,27}]
wire [2:0] mask_sizeOH = {_mask_sizeOH_T_2[2:1], 1'h1}; // @[OneHot.scala:65:27]
wire mask_sub_sub_sub_0_1 = &io_in_a_bits_size_0; // @[Misc.scala:206:21]
wire mask_sub_sub_size = mask_sizeOH[2]; // @[Misc.scala:202:81, :209:26]
wire mask_sub_sub_bit = io_in_a_bits_address_0[2]; // @[Misc.scala:210:26]
wire mask_sub_sub_1_2 = mask_sub_sub_bit; // @[Misc.scala:210:26, :214:27]
wire mask_sub_sub_nbit = ~mask_sub_sub_bit; // @[Misc.scala:210:26, :211:20]
wire mask_sub_sub_0_2 = mask_sub_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_sub_acc_T = mask_sub_sub_size & mask_sub_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_sub_0_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T; // @[Misc.scala:206:21, :215:{29,38}]
wire _mask_sub_sub_acc_T_1 = mask_sub_sub_size & mask_sub_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_sub_1_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T_1; // @[Misc.scala:206:21, :215:{29,38}]
wire mask_sub_size = mask_sizeOH[1]; // @[Misc.scala:202:81, :209:26]
wire mask_sub_bit = io_in_a_bits_address_0[1]; // @[Misc.scala:210:26]
wire mask_sub_nbit = ~mask_sub_bit; // @[Misc.scala:210:26, :211:20]
wire mask_sub_0_2 = mask_sub_sub_0_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_acc_T = mask_sub_size & mask_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_0_1 = mask_sub_sub_0_1 | _mask_sub_acc_T; // @[Misc.scala:215:{29,38}]
wire mask_sub_1_2 = mask_sub_sub_0_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_sub_acc_T_1 = mask_sub_size & mask_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_1_1 = mask_sub_sub_0_1 | _mask_sub_acc_T_1; // @[Misc.scala:215:{29,38}]
wire mask_sub_2_2 = mask_sub_sub_1_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_acc_T_2 = mask_sub_size & mask_sub_2_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_2_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_2; // @[Misc.scala:215:{29,38}]
wire mask_sub_3_2 = mask_sub_sub_1_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_sub_acc_T_3 = mask_sub_size & mask_sub_3_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_3_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_3; // @[Misc.scala:215:{29,38}]
wire mask_size = mask_sizeOH[0]; // @[Misc.scala:202:81, :209:26]
wire mask_bit = io_in_a_bits_address_0[0]; // @[Misc.scala:210:26]
wire mask_nbit = ~mask_bit; // @[Misc.scala:210:26, :211:20]
wire mask_eq = mask_sub_0_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T = mask_size & mask_eq; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc = mask_sub_0_1 | _mask_acc_T; // @[Misc.scala:215:{29,38}]
wire mask_eq_1 = mask_sub_0_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_1 = mask_size & mask_eq_1; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_1 = mask_sub_0_1 | _mask_acc_T_1; // @[Misc.scala:215:{29,38}]
wire mask_eq_2 = mask_sub_1_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_2 = mask_size & mask_eq_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_2 = mask_sub_1_1 | _mask_acc_T_2; // @[Misc.scala:215:{29,38}]
wire mask_eq_3 = mask_sub_1_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_3 = mask_size & mask_eq_3; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_3 = mask_sub_1_1 | _mask_acc_T_3; // @[Misc.scala:215:{29,38}]
wire mask_eq_4 = mask_sub_2_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_4 = mask_size & mask_eq_4; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_4 = mask_sub_2_1 | _mask_acc_T_4; // @[Misc.scala:215:{29,38}]
wire mask_eq_5 = mask_sub_2_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_5 = mask_size & mask_eq_5; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_5 = mask_sub_2_1 | _mask_acc_T_5; // @[Misc.scala:215:{29,38}]
wire mask_eq_6 = mask_sub_3_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_6 = mask_size & mask_eq_6; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_6 = mask_sub_3_1 | _mask_acc_T_6; // @[Misc.scala:215:{29,38}]
wire mask_eq_7 = mask_sub_3_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_7 = mask_size & mask_eq_7; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_7 = mask_sub_3_1 | _mask_acc_T_7; // @[Misc.scala:215:{29,38}]
wire [1:0] mask_lo_lo = {mask_acc_1, mask_acc}; // @[Misc.scala:215:29, :222:10]
wire [1:0] mask_lo_hi = {mask_acc_3, mask_acc_2}; // @[Misc.scala:215:29, :222:10]
wire [3:0] mask_lo = {mask_lo_hi, mask_lo_lo}; // @[Misc.scala:222:10]
wire [1:0] mask_hi_lo = {mask_acc_5, mask_acc_4}; // @[Misc.scala:215:29, :222:10]
wire [1:0] mask_hi_hi = {mask_acc_7, mask_acc_6}; // @[Misc.scala:215:29, :222:10]
wire [3:0] mask_hi = {mask_hi_hi, mask_hi_lo}; // @[Misc.scala:222:10]
wire [7:0] mask = {mask_hi, mask_lo}; // @[Misc.scala:222:10]
wire [10:0] uncommonBits = _uncommonBits_T; // @[Parameters.scala:52:{29,56}]
wire [10:0] uncommonBits_1 = _uncommonBits_T_1; // @[Parameters.scala:52:{29,56}]
wire [10:0] uncommonBits_2 = _uncommonBits_T_2; // @[Parameters.scala:52:{29,56}]
wire [10:0] uncommonBits_3 = _uncommonBits_T_3; // @[Parameters.scala:52:{29,56}]
wire [10:0] uncommonBits_4 = _uncommonBits_T_4; // @[Parameters.scala:52:{29,56}]
wire [10:0] uncommonBits_5 = _uncommonBits_T_5; // @[Parameters.scala:52:{29,56}]
wire [10:0] uncommonBits_6 = _uncommonBits_T_6; // @[Parameters.scala:52:{29,56}]
wire [10:0] uncommonBits_7 = _uncommonBits_T_7; // @[Parameters.scala:52:{29,56}]
wire [10:0] uncommonBits_8 = _uncommonBits_T_8; // @[Parameters.scala:52:{29,56}]
wire [10:0] source_ok_uncommonBits_1 = _source_ok_uncommonBits_T_1; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_10 = source_ok_uncommonBits_1 < 11'h410; // @[Parameters.scala:52:56, :57:20]
wire _source_ok_T_11 = _source_ok_T_10; // @[Parameters.scala:56:48, :57:20]
wire _source_ok_WIRE_1_0 = _source_ok_T_11; // @[Parameters.scala:1138:31]
wire _T_665 = io_in_a_ready_0 & io_in_a_valid_0; // @[Decoupled.scala:51:35]
wire _a_first_T; // @[Decoupled.scala:51:35]
assign _a_first_T = _T_665; // @[Decoupled.scala:51:35]
wire _a_first_T_1; // @[Decoupled.scala:51:35]
assign _a_first_T_1 = _T_665; // @[Decoupled.scala:51:35]
wire a_first_done = _a_first_T; // @[Decoupled.scala:51:35]
wire [2:0] _a_first_beats1_decode_T_1 = _a_first_beats1_decode_T[2:0]; // @[package.scala:243:{71,76}]
wire [2:0] _a_first_beats1_decode_T_2 = ~_a_first_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire _a_first_beats1_opdata_T = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7]
wire _a_first_beats1_opdata_T_1 = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7]
wire a_first_beats1_opdata = ~_a_first_beats1_opdata_T; // @[Edges.scala:92:{28,37}]
reg a_first_counter; // @[Edges.scala:229:27]
wire _a_first_last_T = a_first_counter; // @[Edges.scala:229:27, :232:25]
wire [1:0] _a_first_counter1_T = {1'h0, a_first_counter} - 2'h1; // @[Edges.scala:229:27, :230:28]
wire a_first_counter1 = _a_first_counter1_T[0]; // @[Edges.scala:230:28]
wire a_first = ~a_first_counter; // @[Edges.scala:229:27, :231:25]
wire _a_first_count_T = ~a_first_counter1; // @[Edges.scala:230:28, :234:27]
wire _a_first_counter_T = ~a_first & a_first_counter1; // @[Edges.scala:230:28, :231:25, :236:21]
reg [2:0] opcode; // @[Monitor.scala:387:22]
reg [2:0] param; // @[Monitor.scala:388:22]
reg [1:0] size; // @[Monitor.scala:389:22]
reg [10:0] source; // @[Monitor.scala:390:22]
reg [20:0] address; // @[Monitor.scala:391:22]
wire _T_733 = io_in_d_ready_0 & io_in_d_valid_0; // @[Decoupled.scala:51:35]
wire _d_first_T; // @[Decoupled.scala:51:35]
assign _d_first_T = _T_733; // @[Decoupled.scala:51:35]
wire _d_first_T_1; // @[Decoupled.scala:51:35]
assign _d_first_T_1 = _T_733; // @[Decoupled.scala:51:35]
wire _d_first_T_2; // @[Decoupled.scala:51:35]
assign _d_first_T_2 = _T_733; // @[Decoupled.scala:51:35]
wire d_first_done = _d_first_T; // @[Decoupled.scala:51:35]
wire [5:0] _GEN_0 = 6'h7 << io_in_d_bits_size_0; // @[package.scala:243:71]
wire [5:0] _d_first_beats1_decode_T; // @[package.scala:243:71]
assign _d_first_beats1_decode_T = _GEN_0; // @[package.scala:243:71]
wire [5:0] _d_first_beats1_decode_T_3; // @[package.scala:243:71]
assign _d_first_beats1_decode_T_3 = _GEN_0; // @[package.scala:243:71]
wire [5:0] _d_first_beats1_decode_T_6; // @[package.scala:243:71]
assign _d_first_beats1_decode_T_6 = _GEN_0; // @[package.scala:243:71]
wire [2:0] _d_first_beats1_decode_T_1 = _d_first_beats1_decode_T[2:0]; // @[package.scala:243:{71,76}]
wire [2:0] _d_first_beats1_decode_T_2 = ~_d_first_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire d_first_beats1_opdata = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire d_first_beats1_opdata_1 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire d_first_beats1_opdata_2 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
reg d_first_counter; // @[Edges.scala:229:27]
wire _d_first_last_T = d_first_counter; // @[Edges.scala:229:27, :232:25]
wire [1:0] _d_first_counter1_T = {1'h0, d_first_counter} - 2'h1; // @[Edges.scala:229:27, :230:28]
wire d_first_counter1 = _d_first_counter1_T[0]; // @[Edges.scala:230:28]
wire d_first = ~d_first_counter; // @[Edges.scala:229:27, :231:25]
wire _d_first_count_T = ~d_first_counter1; // @[Edges.scala:230:28, :234:27]
wire _d_first_counter_T = ~d_first & d_first_counter1; // @[Edges.scala:230:28, :231:25, :236:21]
reg [2:0] opcode_1; // @[Monitor.scala:538:22]
reg [1:0] size_1; // @[Monitor.scala:540:22]
reg [10:0] source_1; // @[Monitor.scala:541:22]
reg [1039:0] inflight; // @[Monitor.scala:614:27]
reg [4159:0] inflight_opcodes; // @[Monitor.scala:616:35]
reg [4159:0] inflight_sizes; // @[Monitor.scala:618:33]
wire a_first_done_1 = _a_first_T_1; // @[Decoupled.scala:51:35]
wire [2:0] _a_first_beats1_decode_T_4 = _a_first_beats1_decode_T_3[2:0]; // @[package.scala:243:{71,76}]
wire [2:0] _a_first_beats1_decode_T_5 = ~_a_first_beats1_decode_T_4; // @[package.scala:243:{46,76}]
wire a_first_beats1_opdata_1 = ~_a_first_beats1_opdata_T_1; // @[Edges.scala:92:{28,37}]
reg a_first_counter_1; // @[Edges.scala:229:27]
wire _a_first_last_T_2 = a_first_counter_1; // @[Edges.scala:229:27, :232:25]
wire [1:0] _a_first_counter1_T_1 = {1'h0, a_first_counter_1} - 2'h1; // @[Edges.scala:229:27, :230:28]
wire a_first_counter1_1 = _a_first_counter1_T_1[0]; // @[Edges.scala:230:28]
wire a_first_1 = ~a_first_counter_1; // @[Edges.scala:229:27, :231:25]
wire _a_first_count_T_1 = ~a_first_counter1_1; // @[Edges.scala:230:28, :234:27]
wire _a_first_counter_T_1 = ~a_first_1 & a_first_counter1_1; // @[Edges.scala:230:28, :231:25, :236:21]
wire d_first_done_1 = _d_first_T_1; // @[Decoupled.scala:51:35]
wire [2:0] _d_first_beats1_decode_T_4 = _d_first_beats1_decode_T_3[2:0]; // @[package.scala:243:{71,76}]
wire [2:0] _d_first_beats1_decode_T_5 = ~_d_first_beats1_decode_T_4; // @[package.scala:243:{46,76}]
reg d_first_counter_1; // @[Edges.scala:229:27]
wire _d_first_last_T_2 = d_first_counter_1; // @[Edges.scala:229:27, :232:25]
wire [1:0] _d_first_counter1_T_1 = {1'h0, d_first_counter_1} - 2'h1; // @[Edges.scala:229:27, :230:28]
wire d_first_counter1_1 = _d_first_counter1_T_1[0]; // @[Edges.scala:230:28]
wire d_first_1 = ~d_first_counter_1; // @[Edges.scala:229:27, :231:25]
wire _d_first_count_T_1 = ~d_first_counter1_1; // @[Edges.scala:230:28, :234:27]
wire _d_first_counter_T_1 = ~d_first_1 & d_first_counter1_1; // @[Edges.scala:230:28, :231:25, :236:21]
wire [1039:0] a_set; // @[Monitor.scala:626:34]
wire [1039:0] a_set_wo_ready; // @[Monitor.scala:627:34]
wire [4159:0] a_opcodes_set; // @[Monitor.scala:630:33]
wire [4159:0] a_sizes_set; // @[Monitor.scala:632:31]
wire [2:0] a_opcode_lookup; // @[Monitor.scala:635:35]
wire [13:0] _GEN_1 = {1'h0, io_in_d_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :637:69]
wire [13:0] _a_opcode_lookup_T; // @[Monitor.scala:637:69]
assign _a_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69]
wire [13:0] _a_size_lookup_T; // @[Monitor.scala:641:65]
assign _a_size_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :641:65]
wire [13:0] _d_opcodes_clr_T_4; // @[Monitor.scala:680:101]
assign _d_opcodes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :680:101]
wire [13:0] _d_sizes_clr_T_4; // @[Monitor.scala:681:99]
assign _d_sizes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :681:99]
wire [13:0] _c_opcode_lookup_T; // @[Monitor.scala:749:69]
assign _c_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :749:69]
wire [13:0] _c_size_lookup_T; // @[Monitor.scala:750:67]
assign _c_size_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :750:67]
wire [13:0] _d_opcodes_clr_T_10; // @[Monitor.scala:790:101]
assign _d_opcodes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :790:101]
wire [13:0] _d_sizes_clr_T_10; // @[Monitor.scala:791:99]
assign _d_sizes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :791:99]
wire [4159:0] _a_opcode_lookup_T_1 = inflight_opcodes >> _a_opcode_lookup_T; // @[Monitor.scala:616:35, :637:{44,69}]
wire [4159:0] _a_opcode_lookup_T_6 = {4156'h0, _a_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:637:{44,97}]
wire [4159:0] _a_opcode_lookup_T_7 = {1'h0, _a_opcode_lookup_T_6[4159:1]}; // @[Monitor.scala:637:{97,152}]
assign a_opcode_lookup = _a_opcode_lookup_T_7[2:0]; // @[Monitor.scala:635:35, :637:{21,152}]
wire [3:0] a_size_lookup; // @[Monitor.scala:639:33]
wire [4159:0] _a_size_lookup_T_1 = inflight_sizes >> _a_size_lookup_T; // @[Monitor.scala:618:33, :641:{40,65}]
wire [4159:0] _a_size_lookup_T_6 = {4156'h0, _a_size_lookup_T_1[3:0]}; // @[Monitor.scala:641:{40,91}]
wire [4159:0] _a_size_lookup_T_7 = {1'h0, _a_size_lookup_T_6[4159:1]}; // @[Monitor.scala:641:{91,144}]
assign a_size_lookup = _a_size_lookup_T_7[3:0]; // @[Monitor.scala:639:33, :641:{19,144}]
wire [3:0] a_opcodes_set_interm; // @[Monitor.scala:646:40]
wire [2:0] a_sizes_set_interm; // @[Monitor.scala:648:38]
wire _same_cycle_resp_T = io_in_a_valid_0 & a_first_1; // @[Monitor.scala:36:7, :651:26, :684:44]
wire [2047:0] _GEN_2 = 2048'h1 << io_in_a_bits_source_0; // @[OneHot.scala:58:35]
wire [2047:0] _a_set_wo_ready_T; // @[OneHot.scala:58:35]
assign _a_set_wo_ready_T = _GEN_2; // @[OneHot.scala:58:35]
wire [2047:0] _a_set_T; // @[OneHot.scala:58:35]
assign _a_set_T = _GEN_2; // @[OneHot.scala:58:35]
assign a_set_wo_ready = _same_cycle_resp_T ? _a_set_wo_ready_T[1039:0] : 1040'h0; // @[OneHot.scala:58:35]
wire _T_598 = _T_665 & a_first_1; // @[Decoupled.scala:51:35]
assign a_set = _T_598 ? _a_set_T[1039:0] : 1040'h0; // @[OneHot.scala:58:35]
wire [3:0] _a_opcodes_set_interm_T = {io_in_a_bits_opcode_0, 1'h0}; // @[Monitor.scala:36:7, :657:53]
wire [3:0] _a_opcodes_set_interm_T_1 = {_a_opcodes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:657:{53,61}]
assign a_opcodes_set_interm = _T_598 ? _a_opcodes_set_interm_T_1 : 4'h0; // @[Monitor.scala:646:40, :655:{25,70}, :657:{28,61}]
wire [2:0] _a_sizes_set_interm_T = {io_in_a_bits_size_0, 1'h0}; // @[Monitor.scala:36:7, :658:51]
wire [2:0] _a_sizes_set_interm_T_1 = {_a_sizes_set_interm_T[2:1], 1'h1}; // @[Monitor.scala:658:{51,59}]
assign a_sizes_set_interm = _T_598 ? _a_sizes_set_interm_T_1 : 3'h0; // @[Monitor.scala:648:38, :655:{25,70}, :658:{28,59}]
wire [13:0] _GEN_3 = {1'h0, io_in_a_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :659:79]
wire [13:0] _a_opcodes_set_T; // @[Monitor.scala:659:79]
assign _a_opcodes_set_T = _GEN_3; // @[Monitor.scala:659:79]
wire [13:0] _a_sizes_set_T; // @[Monitor.scala:660:77]
assign _a_sizes_set_T = _GEN_3; // @[Monitor.scala:659:79, :660:77]
wire [16386:0] _a_opcodes_set_T_1 = {16383'h0, a_opcodes_set_interm} << _a_opcodes_set_T; // @[Monitor.scala:646:40, :659:{54,79}]
assign a_opcodes_set = _T_598 ? _a_opcodes_set_T_1[4159:0] : 4160'h0; // @[Monitor.scala:630:33, :655:{25,70}, :659:{28,54}]
wire [16385:0] _a_sizes_set_T_1 = {16383'h0, a_sizes_set_interm} << _a_sizes_set_T; // @[Monitor.scala:648:38, :659:54, :660:{52,77}]
assign a_sizes_set = _T_598 ? _a_sizes_set_T_1[4159:0] : 4160'h0; // @[Monitor.scala:632:31, :655:{25,70}, :660:{28,52}]
wire [1039:0] d_clr; // @[Monitor.scala:664:34]
wire [1039:0] d_clr_wo_ready; // @[Monitor.scala:665:34]
wire [4159:0] d_opcodes_clr; // @[Monitor.scala:668:33]
wire [4159:0] d_sizes_clr; // @[Monitor.scala:670:31]
wire _GEN_4 = io_in_d_bits_opcode_0 == 3'h6; // @[Monitor.scala:36:7, :673:46]
wire d_release_ack; // @[Monitor.scala:673:46]
assign d_release_ack = _GEN_4; // @[Monitor.scala:673:46]
wire d_release_ack_1; // @[Monitor.scala:783:46]
assign d_release_ack_1 = _GEN_4; // @[Monitor.scala:673:46, :783:46]
wire _T_644 = io_in_d_valid_0 & d_first_1; // @[Monitor.scala:36:7, :674:26]
wire [2047:0] _GEN_5 = 2048'h1 << io_in_d_bits_source_0; // @[OneHot.scala:58:35]
wire [2047:0] _d_clr_wo_ready_T; // @[OneHot.scala:58:35]
assign _d_clr_wo_ready_T = _GEN_5; // @[OneHot.scala:58:35]
wire [2047:0] _d_clr_T; // @[OneHot.scala:58:35]
assign _d_clr_T = _GEN_5; // @[OneHot.scala:58:35]
wire [2047:0] _d_clr_wo_ready_T_1; // @[OneHot.scala:58:35]
assign _d_clr_wo_ready_T_1 = _GEN_5; // @[OneHot.scala:58:35]
wire [2047:0] _d_clr_T_1; // @[OneHot.scala:58:35]
assign _d_clr_T_1 = _GEN_5; // @[OneHot.scala:58:35]
assign d_clr_wo_ready = _T_644 & ~d_release_ack ? _d_clr_wo_ready_T[1039:0] : 1040'h0; // @[OneHot.scala:58:35]
wire _T_613 = _T_733 & d_first_1 & ~d_release_ack; // @[Decoupled.scala:51:35]
assign d_clr = _T_613 ? _d_clr_T[1039:0] : 1040'h0; // @[OneHot.scala:58:35]
wire [16398:0] _d_opcodes_clr_T_5 = 16399'hF << _d_opcodes_clr_T_4; // @[Monitor.scala:680:{76,101}]
assign d_opcodes_clr = _T_613 ? _d_opcodes_clr_T_5[4159:0] : 4160'h0; // @[Monitor.scala:668:33, :678:{25,70,89}, :680:{21,76}]
wire [16398:0] _d_sizes_clr_T_5 = 16399'hF << _d_sizes_clr_T_4; // @[Monitor.scala:681:{74,99}]
assign d_sizes_clr = _T_613 ? _d_sizes_clr_T_5[4159:0] : 4160'h0; // @[Monitor.scala:670:31, :678:{25,70,89}, :681:{21,74}]
wire _same_cycle_resp_T_1 = _same_cycle_resp_T; // @[Monitor.scala:684:{44,55}]
wire _same_cycle_resp_T_2 = io_in_a_bits_source_0 == io_in_d_bits_source_0; // @[Monitor.scala:36:7, :684:113]
wire same_cycle_resp = _same_cycle_resp_T_1 & _same_cycle_resp_T_2; // @[Monitor.scala:684:{55,88,113}]
wire [1039:0] _inflight_T = inflight | a_set; // @[Monitor.scala:614:27, :626:34, :705:27]
wire [1039:0] _inflight_T_1 = ~d_clr; // @[Monitor.scala:664:34, :705:38]
wire [1039:0] _inflight_T_2 = _inflight_T & _inflight_T_1; // @[Monitor.scala:705:{27,36,38}]
wire [4159:0] _inflight_opcodes_T = inflight_opcodes | a_opcodes_set; // @[Monitor.scala:616:35, :630:33, :706:43]
wire [4159:0] _inflight_opcodes_T_1 = ~d_opcodes_clr; // @[Monitor.scala:668:33, :706:62]
wire [4159:0] _inflight_opcodes_T_2 = _inflight_opcodes_T & _inflight_opcodes_T_1; // @[Monitor.scala:706:{43,60,62}]
wire [4159:0] _inflight_sizes_T = inflight_sizes | a_sizes_set; // @[Monitor.scala:618:33, :632:31, :707:39]
wire [4159:0] _inflight_sizes_T_1 = ~d_sizes_clr; // @[Monitor.scala:670:31, :707:56]
wire [4159:0] _inflight_sizes_T_2 = _inflight_sizes_T & _inflight_sizes_T_1; // @[Monitor.scala:707:{39,54,56}]
reg [31:0] watchdog; // @[Monitor.scala:709:27]
wire [32:0] _watchdog_T = {1'h0, watchdog} + 33'h1; // @[Monitor.scala:709:27, :714:26]
wire [31:0] _watchdog_T_1 = _watchdog_T[31:0]; // @[Monitor.scala:714:26]
reg [1039:0] inflight_1; // @[Monitor.scala:726:35]
wire [1039:0] _inflight_T_3 = inflight_1; // @[Monitor.scala:726:35, :814:35]
reg [4159:0] inflight_opcodes_1; // @[Monitor.scala:727:35]
wire [4159:0] _inflight_opcodes_T_3 = inflight_opcodes_1; // @[Monitor.scala:727:35, :815:43]
reg [4159:0] inflight_sizes_1; // @[Monitor.scala:728:35]
wire [4159:0] _inflight_sizes_T_3 = inflight_sizes_1; // @[Monitor.scala:728:35, :816:41]
wire d_first_done_2 = _d_first_T_2; // @[Decoupled.scala:51:35]
wire [2:0] _d_first_beats1_decode_T_7 = _d_first_beats1_decode_T_6[2:0]; // @[package.scala:243:{71,76}]
wire [2:0] _d_first_beats1_decode_T_8 = ~_d_first_beats1_decode_T_7; // @[package.scala:243:{46,76}]
reg d_first_counter_2; // @[Edges.scala:229:27]
wire _d_first_last_T_4 = d_first_counter_2; // @[Edges.scala:229:27, :232:25]
wire [1:0] _d_first_counter1_T_2 = {1'h0, d_first_counter_2} - 2'h1; // @[Edges.scala:229:27, :230:28]
wire d_first_counter1_2 = _d_first_counter1_T_2[0]; // @[Edges.scala:230:28]
wire d_first_2 = ~d_first_counter_2; // @[Edges.scala:229:27, :231:25]
wire _d_first_count_T_2 = ~d_first_counter1_2; // @[Edges.scala:230:28, :234:27]
wire _d_first_counter_T_2 = ~d_first_2 & d_first_counter1_2; // @[Edges.scala:230:28, :231:25, :236:21]
wire [3:0] c_opcode_lookup; // @[Monitor.scala:747:35]
wire [3:0] c_size_lookup; // @[Monitor.scala:748:35]
wire [4159:0] _c_opcode_lookup_T_1 = inflight_opcodes_1 >> _c_opcode_lookup_T; // @[Monitor.scala:727:35, :749:{44,69}]
wire [4159:0] _c_opcode_lookup_T_6 = {4156'h0, _c_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:749:{44,97}]
wire [4159:0] _c_opcode_lookup_T_7 = {1'h0, _c_opcode_lookup_T_6[4159:1]}; // @[Monitor.scala:749:{97,152}]
assign c_opcode_lookup = _c_opcode_lookup_T_7[3:0]; // @[Monitor.scala:747:35, :749:{21,152}]
wire [4159:0] _c_size_lookup_T_1 = inflight_sizes_1 >> _c_size_lookup_T; // @[Monitor.scala:728:35, :750:{42,67}]
wire [4159:0] _c_size_lookup_T_6 = {4156'h0, _c_size_lookup_T_1[3:0]}; // @[Monitor.scala:750:{42,93}]
wire [4159:0] _c_size_lookup_T_7 = {1'h0, _c_size_lookup_T_6[4159:1]}; // @[Monitor.scala:750:{93,146}]
assign c_size_lookup = _c_size_lookup_T_7[3:0]; // @[Monitor.scala:748:35, :750:{21,146}]
wire [1039:0] d_clr_1; // @[Monitor.scala:774:34]
wire [1039:0] d_clr_wo_ready_1; // @[Monitor.scala:775:34]
wire [4159:0] d_opcodes_clr_1; // @[Monitor.scala:776:34]
wire [4159:0] d_sizes_clr_1; // @[Monitor.scala:777:34]
wire _T_709 = io_in_d_valid_0 & d_first_2; // @[Monitor.scala:36:7, :784:26]
assign d_clr_wo_ready_1 = _T_709 & d_release_ack_1 ? _d_clr_wo_ready_T_1[1039:0] : 1040'h0; // @[OneHot.scala:58:35]
wire _T_691 = _T_733 & d_first_2 & d_release_ack_1; // @[Decoupled.scala:51:35]
assign d_clr_1 = _T_691 ? _d_clr_T_1[1039:0] : 1040'h0; // @[OneHot.scala:58:35]
wire [16398:0] _d_opcodes_clr_T_11 = 16399'hF << _d_opcodes_clr_T_10; // @[Monitor.scala:790:{76,101}]
assign d_opcodes_clr_1 = _T_691 ? _d_opcodes_clr_T_11[4159:0] : 4160'h0; // @[Monitor.scala:776:34, :788:{25,70,88}, :790:{21,76}]
wire [16398:0] _d_sizes_clr_T_11 = 16399'hF << _d_sizes_clr_T_10; // @[Monitor.scala:791:{74,99}]
assign d_sizes_clr_1 = _T_691 ? _d_sizes_clr_T_11[4159:0] : 4160'h0; // @[Monitor.scala:777:34, :788:{25,70,88}, :791:{21,74}]
wire _same_cycle_resp_T_8 = io_in_d_bits_source_0 == 11'h0; // @[Monitor.scala:36:7, :795:113]
wire [1039:0] _inflight_T_4 = ~d_clr_1; // @[Monitor.scala:774:34, :814:46]
wire [1039:0] _inflight_T_5 = _inflight_T_3 & _inflight_T_4; // @[Monitor.scala:814:{35,44,46}]
wire [4159:0] _inflight_opcodes_T_4 = ~d_opcodes_clr_1; // @[Monitor.scala:776:34, :815:62]
wire [4159:0] _inflight_opcodes_T_5 = _inflight_opcodes_T_3 & _inflight_opcodes_T_4; // @[Monitor.scala:815:{43,60,62}]
wire [4159:0] _inflight_sizes_T_4 = ~d_sizes_clr_1; // @[Monitor.scala:777:34, :816:58]
wire [4159:0] _inflight_sizes_T_5 = _inflight_sizes_T_3 & _inflight_sizes_T_4; // @[Monitor.scala:816:{41,56,58}]
reg [31:0] watchdog_1; // @[Monitor.scala:818:27] |
Generate the Verilog code corresponding to the following Chisel files.
File Transposer.scala:
package gemmini
import chisel3._
import chisel3.util._
import Util._
trait Transposer[T <: Data] extends Module {
def dim: Int
def dataType: T
val io = IO(new Bundle {
val inRow = Flipped(Decoupled(Vec(dim, dataType)))
val outCol = Decoupled(Vec(dim, dataType))
})
}
class PipelinedTransposer[T <: Data](val dim: Int, val dataType: T) extends Transposer[T] {
require(isPow2(dim))
val regArray = Seq.fill(dim, dim)(Reg(dataType))
val regArrayT = regArray.transpose
val sMoveUp :: sMoveLeft :: Nil = Enum(2)
val state = RegInit(sMoveUp)
val leftCounter = RegInit(0.U(log2Ceil(dim+1).W)) //(io.inRow.fire && state === sMoveLeft, dim+1)
val upCounter = RegInit(0.U(log2Ceil(dim+1).W)) //Counter(io.inRow.fire && state === sMoveUp, dim+1)
io.outCol.valid := 0.U
io.inRow.ready := 0.U
switch(state) {
is(sMoveUp) {
io.inRow.ready := upCounter <= dim.U
io.outCol.valid := leftCounter > 0.U
when(io.inRow.fire) {
upCounter := upCounter + 1.U
}
when(upCounter === (dim-1).U) {
state := sMoveLeft
leftCounter := 0.U
}
when(io.outCol.fire) {
leftCounter := leftCounter - 1.U
}
}
is(sMoveLeft) {
io.inRow.ready := leftCounter <= dim.U // TODO: this is naive
io.outCol.valid := upCounter > 0.U
when(leftCounter === (dim-1).U) {
state := sMoveUp
}
when(io.inRow.fire) {
leftCounter := leftCounter + 1.U
upCounter := 0.U
}
when(io.outCol.fire) {
upCounter := upCounter - 1.U
}
}
}
// Propagate input from bottom row to top row systolically in the move up phase
// TODO: need to iterate over columns to connect Chisel values of type T
// Should be able to operate directly on the Vec, but Seq and Vec don't mix (try Array?)
for (colIdx <- 0 until dim) {
regArray.foldRight(io.inRow.bits(colIdx)) {
case (regRow, prevReg) =>
when (state === sMoveUp) {
regRow(colIdx) := prevReg
}
regRow(colIdx)
}
}
// Propagate input from right side to left side systolically in the move left phase
for (rowIdx <- 0 until dim) {
regArrayT.foldRight(io.inRow.bits(rowIdx)) {
case (regCol, prevReg) =>
when (state === sMoveLeft) {
regCol(rowIdx) := prevReg
}
regCol(rowIdx)
}
}
// Pull from the left side or the top side based on the state
for (idx <- 0 until dim) {
when (state === sMoveUp) {
io.outCol.bits(idx) := regArray(0)(idx)
}.elsewhen(state === sMoveLeft) {
io.outCol.bits(idx) := regArrayT(0)(idx)
}.otherwise {
io.outCol.bits(idx) := DontCare
}
}
}
class AlwaysOutTransposer[T <: Data](val dim: Int, val dataType: T) extends Transposer[T] {
require(isPow2(dim))
val LEFT_DIR = 0.U(1.W)
val UP_DIR = 1.U(1.W)
class PE extends Module {
val io = IO(new Bundle {
val inR = Input(dataType)
val inD = Input(dataType)
val outL = Output(dataType)
val outU = Output(dataType)
val dir = Input(UInt(1.W))
val en = Input(Bool())
})
val reg = RegEnable(Mux(io.dir === LEFT_DIR, io.inR, io.inD), io.en)
io.outU := reg
io.outL := reg
}
val pes = Seq.fill(dim,dim)(Module(new PE))
val counter = RegInit(0.U((log2Ceil(dim) max 1).W)) // TODO replace this with a standard Chisel counter
val dir = RegInit(LEFT_DIR)
// Wire up horizontal signals
for (row <- 0 until dim; col <- 0 until dim) {
val right_in = if (col == dim-1) io.inRow.bits(row) else pes(row)(col+1).io.outL
pes(row)(col).io.inR := right_in
}
// Wire up vertical signals
for (row <- 0 until dim; col <- 0 until dim) {
val down_in = if (row == dim-1) io.inRow.bits(col) else pes(row+1)(col).io.outU
pes(row)(col).io.inD := down_in
}
// Wire up global signals
pes.flatten.foreach(_.io.dir := dir)
pes.flatten.foreach(_.io.en := io.inRow.fire)
io.outCol.valid := true.B
io.inRow.ready := true.B
val left_out = VecInit(pes.transpose.head.map(_.io.outL))
val up_out = VecInit(pes.head.map(_.io.outU))
io.outCol.bits := Mux(dir === LEFT_DIR, left_out, up_out)
when (io.inRow.fire) {
counter := wrappingAdd(counter, 1.U, dim)
}
when (counter === (dim-1).U && io.inRow.fire) {
dir := ~dir
}
}
class NaiveTransposer[T <: Data](val dim: Int, val dataType: T) extends Transposer[T] {
val regArray = Seq.fill(dim, dim)(Reg(dataType))
val regArrayT = regArray.transpose
// state = 0 => filling regArray row-wise, state = 1 => draining regArray column-wise
val state = RegInit(0.U(1.W))
val countInc = io.inRow.fire || io.outCol.fire
val (countValue, countWrap) = Counter(countInc, dim)
io.inRow.ready := state === 0.U
io.outCol.valid := state === 1.U
for (i <- 0 until dim) {
for (j <- 0 until dim) {
when(countValue === i.U && io.inRow.fire) {
regArray(i)(j) := io.inRow.bits(j)
}
}
}
for (i <- 0 until dim) {
io.outCol.bits(i) := 0.U
for (j <- 0 until dim) {
when(countValue === j.U) {
io.outCol.bits(i) := regArrayT(j)(i)
}
}
}
when (io.inRow.fire && countWrap) {
state := 1.U
}
when (io.outCol.fire && countWrap) {
state := 0.U
}
assert(!(state === 0.U) || !io.outCol.fire)
assert(!(state === 1.U) || !io.inRow.fire)
}
| module PE_87( // @[Transposer.scala:100:9]
input clock, // @[Transposer.scala:100:9]
input reset, // @[Transposer.scala:100:9]
input [7:0] io_inR, // @[Transposer.scala:101:16]
input [7:0] io_inD, // @[Transposer.scala:101:16]
output [7:0] io_outL, // @[Transposer.scala:101:16]
output [7:0] io_outU, // @[Transposer.scala:101:16]
input io_dir, // @[Transposer.scala:101:16]
input io_en // @[Transposer.scala:101:16]
);
wire [7:0] io_inR_0 = io_inR; // @[Transposer.scala:100:9]
wire [7:0] io_inD_0 = io_inD; // @[Transposer.scala:100:9]
wire io_dir_0 = io_dir; // @[Transposer.scala:100:9]
wire io_en_0 = io_en; // @[Transposer.scala:100:9]
wire [7:0] io_outL_0; // @[Transposer.scala:100:9]
wire [7:0] io_outU_0; // @[Transposer.scala:100:9]
wire _reg_T = ~io_dir_0; // @[Transposer.scala:100:9, :110:36]
wire [7:0] _reg_T_1 = _reg_T ? io_inR_0 : io_inD_0; // @[Transposer.scala:100:9, :110:{28,36}]
reg [7:0] reg_0; // @[Transposer.scala:110:24]
assign io_outL_0 = reg_0; // @[Transposer.scala:100:9, :110:24]
assign io_outU_0 = reg_0; // @[Transposer.scala:100:9, :110:24]
always @(posedge clock) begin // @[Transposer.scala:100:9]
if (io_en_0) // @[Transposer.scala:100:9]
reg_0 <= _reg_T_1; // @[Transposer.scala:110:{24,28}]
always @(posedge)
assign io_outL = io_outL_0; // @[Transposer.scala:100:9]
assign io_outU = io_outU_0; // @[Transposer.scala:100:9]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File AsyncQueue.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
case class AsyncQueueParams(
depth: Int = 8,
sync: Int = 3,
safe: Boolean = true,
// If safe is true, then effort is made to resynchronize the crossing indices when either side is reset.
// This makes it safe/possible to reset one side of the crossing (but not the other) when the queue is empty.
narrow: Boolean = false)
// If narrow is true then the read mux is moved to the source side of the crossing.
// This reduces the number of level shifters in the case where the clock crossing is also a voltage crossing,
// at the expense of a combinational path from the sink to the source and back to the sink.
{
require (depth > 0 && isPow2(depth))
require (sync >= 2)
val bits = log2Ceil(depth)
val wires = if (narrow) 1 else depth
}
object AsyncQueueParams {
// When there is only one entry, we don't need narrow.
def singleton(sync: Int = 3, safe: Boolean = true) = AsyncQueueParams(1, sync, safe, false)
}
class AsyncBundleSafety extends Bundle {
val ridx_valid = Input (Bool())
val widx_valid = Output(Bool())
val source_reset_n = Output(Bool())
val sink_reset_n = Input (Bool())
}
class AsyncBundle[T <: Data](private val gen: T, val params: AsyncQueueParams = AsyncQueueParams()) extends Bundle {
// Data-path synchronization
val mem = Output(Vec(params.wires, gen))
val ridx = Input (UInt((params.bits+1).W))
val widx = Output(UInt((params.bits+1).W))
val index = params.narrow.option(Input(UInt(params.bits.W)))
// Signals used to self-stabilize a safe AsyncQueue
val safe = params.safe.option(new AsyncBundleSafety)
}
object GrayCounter {
def apply(bits: Int, increment: Bool = true.B, clear: Bool = false.B, name: String = "binary"): UInt = {
val incremented = Wire(UInt(bits.W))
val binary = RegNext(next=incremented, init=0.U).suggestName(name)
incremented := Mux(clear, 0.U, binary + increment.asUInt)
incremented ^ (incremented >> 1)
}
}
class AsyncValidSync(sync: Int, desc: String) extends RawModule {
val io = IO(new Bundle {
val in = Input(Bool())
val out = Output(Bool())
})
val clock = IO(Input(Clock()))
val reset = IO(Input(AsyncReset()))
withClockAndReset(clock, reset){
io.out := AsyncResetSynchronizerShiftReg(io.in, sync, Some(desc))
}
}
class AsyncQueueSource[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module {
override def desiredName = s"AsyncQueueSource_${gen.typeName}"
val io = IO(new Bundle {
// These come from the source domain
val enq = Flipped(Decoupled(gen))
// These cross to the sink clock domain
val async = new AsyncBundle(gen, params)
})
val bits = params.bits
val sink_ready = WireInit(true.B)
val mem = Reg(Vec(params.depth, gen)) // This does NOT need to be reset at all.
val widx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.enq.fire, !sink_ready, "widx_bin"))
val ridx = AsyncResetSynchronizerShiftReg(io.async.ridx, params.sync, Some("ridx_gray"))
val ready = sink_ready && widx =/= (ridx ^ (params.depth | params.depth >> 1).U)
val index = if (bits == 0) 0.U else io.async.widx(bits-1, 0) ^ (io.async.widx(bits, bits) << (bits-1))
when (io.enq.fire) { mem(index) := io.enq.bits }
val ready_reg = withReset(reset.asAsyncReset)(RegNext(next=ready, init=false.B).suggestName("ready_reg"))
io.enq.ready := ready_reg && sink_ready
val widx_reg = withReset(reset.asAsyncReset)(RegNext(next=widx, init=0.U).suggestName("widx_gray"))
io.async.widx := widx_reg
io.async.index match {
case Some(index) => io.async.mem(0) := mem(index)
case None => io.async.mem := mem
}
io.async.safe.foreach { sio =>
val source_valid_0 = Module(new AsyncValidSync(params.sync, "source_valid_0"))
val source_valid_1 = Module(new AsyncValidSync(params.sync, "source_valid_1"))
val sink_extend = Module(new AsyncValidSync(params.sync, "sink_extend"))
val sink_valid = Module(new AsyncValidSync(params.sync, "sink_valid"))
source_valid_0.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
source_valid_1.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
sink_extend .reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
sink_valid .reset := reset.asAsyncReset
source_valid_0.clock := clock
source_valid_1.clock := clock
sink_extend .clock := clock
sink_valid .clock := clock
source_valid_0.io.in := true.B
source_valid_1.io.in := source_valid_0.io.out
sio.widx_valid := source_valid_1.io.out
sink_extend.io.in := sio.ridx_valid
sink_valid.io.in := sink_extend.io.out
sink_ready := sink_valid.io.out
sio.source_reset_n := !reset.asBool
// Assert that if there is stuff in the queue, then reset cannot happen
// Impossible to write because dequeue can occur on the receiving side,
// then reset allowed to happen, but write side cannot know that dequeue
// occurred.
// TODO: write some sort of sanity check assertion for users
// that denote don't reset when there is activity
// assert (!(reset || !sio.sink_reset_n) || !io.enq.valid, "Enqueue while sink is reset and AsyncQueueSource is unprotected")
// assert (!reset_rise || prev_idx_match.asBool, "Sink reset while AsyncQueueSource not empty")
}
}
class AsyncQueueSink[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module {
override def desiredName = s"AsyncQueueSink_${gen.typeName}"
val io = IO(new Bundle {
// These come from the sink domain
val deq = Decoupled(gen)
// These cross to the source clock domain
val async = Flipped(new AsyncBundle(gen, params))
})
val bits = params.bits
val source_ready = WireInit(true.B)
val ridx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.deq.fire, !source_ready, "ridx_bin"))
val widx = AsyncResetSynchronizerShiftReg(io.async.widx, params.sync, Some("widx_gray"))
val valid = source_ready && ridx =/= widx
// The mux is safe because timing analysis ensures ridx has reached the register
// On an ASIC, changes to the unread location cannot affect the selected value
// On an FPGA, only one input changes at a time => mem updates don't cause glitches
// The register only latches when the selected valued is not being written
val index = if (bits == 0) 0.U else ridx(bits-1, 0) ^ (ridx(bits, bits) << (bits-1))
io.async.index.foreach { _ := index }
// This register does not NEED to be reset, as its contents will not
// be considered unless the asynchronously reset deq valid register is set.
// It is possible that bits latches when the source domain is reset / has power cut
// This is safe, because isolation gates brought mem low before the zeroed widx reached us
val deq_bits_nxt = io.async.mem(if (params.narrow) 0.U else index)
io.deq.bits := ClockCrossingReg(deq_bits_nxt, en = valid, doInit = false, name = Some("deq_bits_reg"))
val valid_reg = withReset(reset.asAsyncReset)(RegNext(next=valid, init=false.B).suggestName("valid_reg"))
io.deq.valid := valid_reg && source_ready
val ridx_reg = withReset(reset.asAsyncReset)(RegNext(next=ridx, init=0.U).suggestName("ridx_gray"))
io.async.ridx := ridx_reg
io.async.safe.foreach { sio =>
val sink_valid_0 = Module(new AsyncValidSync(params.sync, "sink_valid_0"))
val sink_valid_1 = Module(new AsyncValidSync(params.sync, "sink_valid_1"))
val source_extend = Module(new AsyncValidSync(params.sync, "source_extend"))
val source_valid = Module(new AsyncValidSync(params.sync, "source_valid"))
sink_valid_0 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
sink_valid_1 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
source_extend.reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
source_valid .reset := reset.asAsyncReset
sink_valid_0 .clock := clock
sink_valid_1 .clock := clock
source_extend.clock := clock
source_valid .clock := clock
sink_valid_0.io.in := true.B
sink_valid_1.io.in := sink_valid_0.io.out
sio.ridx_valid := sink_valid_1.io.out
source_extend.io.in := sio.widx_valid
source_valid.io.in := source_extend.io.out
source_ready := source_valid.io.out
sio.sink_reset_n := !reset.asBool
// TODO: write some sort of sanity check assertion for users
// that denote don't reset when there is activity
//
// val reset_and_extend = !source_ready || !sio.source_reset_n || reset.asBool
// val reset_and_extend_prev = RegNext(reset_and_extend, true.B)
// val reset_rise = !reset_and_extend_prev && reset_and_extend
// val prev_idx_match = AsyncResetReg(updateData=(io.async.widx===io.async.ridx), resetData=0)
// assert (!reset_rise || prev_idx_match.asBool, "Source reset while AsyncQueueSink not empty")
}
}
object FromAsyncBundle
{
// Sometimes it makes sense for the sink to have different sync than the source
def apply[T <: Data](x: AsyncBundle[T]): DecoupledIO[T] = apply(x, x.params.sync)
def apply[T <: Data](x: AsyncBundle[T], sync: Int): DecoupledIO[T] = {
val sink = Module(new AsyncQueueSink(chiselTypeOf(x.mem(0)), x.params.copy(sync = sync)))
sink.io.async <> x
sink.io.deq
}
}
object ToAsyncBundle
{
def apply[T <: Data](x: ReadyValidIO[T], params: AsyncQueueParams = AsyncQueueParams()): AsyncBundle[T] = {
val source = Module(new AsyncQueueSource(chiselTypeOf(x.bits), params))
source.io.enq <> x
source.io.async
}
}
class AsyncQueue[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Crossing[T] {
val io = IO(new CrossingIO(gen))
val source = withClockAndReset(io.enq_clock, io.enq_reset) { Module(new AsyncQueueSource(gen, params)) }
val sink = withClockAndReset(io.deq_clock, io.deq_reset) { Module(new AsyncQueueSink (gen, params)) }
source.io.enq <> io.enq
io.deq <> sink.io.deq
sink.io.async <> source.io.async
}
| module AsyncValidSync_108( // @[AsyncQueue.scala:58:7]
output io_out, // @[AsyncQueue.scala:59:14]
input clock, // @[AsyncQueue.scala:63:17]
input reset // @[AsyncQueue.scala:64:17]
);
wire io_in = 1'h1; // @[ShiftReg.scala:45:23]
wire _io_out_WIRE; // @[ShiftReg.scala:48:24]
wire io_out_0; // @[AsyncQueue.scala:58:7]
assign io_out_0 = _io_out_WIRE; // @[ShiftReg.scala:48:24]
AsyncResetSynchronizerShiftReg_w1_d3_i0_122 io_out_sink_valid_0 ( // @[ShiftReg.scala:45:23]
.clock (clock),
.reset (reset),
.io_q (_io_out_WIRE)
); // @[ShiftReg.scala:45:23]
assign io_out = io_out_0; // @[AsyncQueue.scala:58:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File PE.scala:
// See README.md for license details.
package gemmini
import chisel3._
import chisel3.util._
class PEControl[T <: Data : Arithmetic](accType: T) extends Bundle {
val dataflow = UInt(1.W) // TODO make this an Enum
val propagate = UInt(1.W) // Which register should be propagated (and which should be accumulated)?
val shift = UInt(log2Up(accType.getWidth).W) // TODO this isn't correct for Floats
}
class MacUnit[T <: Data](inputType: T, cType: T, dType: T) (implicit ev: Arithmetic[T]) extends Module {
import ev._
val io = IO(new Bundle {
val in_a = Input(inputType)
val in_b = Input(inputType)
val in_c = Input(cType)
val out_d = Output(dType)
})
io.out_d := io.in_c.mac(io.in_a, io.in_b)
}
// TODO update documentation
/**
* A PE implementing a MAC operation. Configured as fully combinational when integrated into a Mesh.
* @param width Data width of operands
*/
class PE[T <: Data](inputType: T, outputType: T, accType: T, df: Dataflow.Value, max_simultaneous_matmuls: Int)
(implicit ev: Arithmetic[T]) extends Module { // Debugging variables
import ev._
val io = IO(new Bundle {
val in_a = Input(inputType)
val in_b = Input(outputType)
val in_d = Input(outputType)
val out_a = Output(inputType)
val out_b = Output(outputType)
val out_c = Output(outputType)
val in_control = Input(new PEControl(accType))
val out_control = Output(new PEControl(accType))
val in_id = Input(UInt(log2Up(max_simultaneous_matmuls).W))
val out_id = Output(UInt(log2Up(max_simultaneous_matmuls).W))
val in_last = Input(Bool())
val out_last = Output(Bool())
val in_valid = Input(Bool())
val out_valid = Output(Bool())
val bad_dataflow = Output(Bool())
})
val cType = if (df == Dataflow.WS) inputType else accType
// When creating PEs that support multiple dataflows, the
// elaboration/synthesis tools often fail to consolidate and de-duplicate
// MAC units. To force mac circuitry to be re-used, we create a "mac_unit"
// module here which just performs a single MAC operation
val mac_unit = Module(new MacUnit(inputType,
if (df == Dataflow.WS) outputType else accType, outputType))
val a = io.in_a
val b = io.in_b
val d = io.in_d
val c1 = Reg(cType)
val c2 = Reg(cType)
val dataflow = io.in_control.dataflow
val prop = io.in_control.propagate
val shift = io.in_control.shift
val id = io.in_id
val last = io.in_last
val valid = io.in_valid
io.out_a := a
io.out_control.dataflow := dataflow
io.out_control.propagate := prop
io.out_control.shift := shift
io.out_id := id
io.out_last := last
io.out_valid := valid
mac_unit.io.in_a := a
val last_s = RegEnable(prop, valid)
val flip = last_s =/= prop
val shift_offset = Mux(flip, shift, 0.U)
// Which dataflow are we using?
val OUTPUT_STATIONARY = Dataflow.OS.id.U(1.W)
val WEIGHT_STATIONARY = Dataflow.WS.id.U(1.W)
// Is c1 being computed on, or propagated forward (in the output-stationary dataflow)?
val COMPUTE = 0.U(1.W)
val PROPAGATE = 1.U(1.W)
io.bad_dataflow := false.B
when ((df == Dataflow.OS).B || ((df == Dataflow.BOTH).B && dataflow === OUTPUT_STATIONARY)) {
when(prop === PROPAGATE) {
io.out_c := (c1 >> shift_offset).clippedToWidthOf(outputType)
io.out_b := b
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c2
c2 := mac_unit.io.out_d
c1 := d.withWidthOf(cType)
}.otherwise {
io.out_c := (c2 >> shift_offset).clippedToWidthOf(outputType)
io.out_b := b
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c1
c1 := mac_unit.io.out_d
c2 := d.withWidthOf(cType)
}
}.elsewhen ((df == Dataflow.WS).B || ((df == Dataflow.BOTH).B && dataflow === WEIGHT_STATIONARY)) {
when(prop === PROPAGATE) {
io.out_c := c1
mac_unit.io.in_b := c2.asTypeOf(inputType)
mac_unit.io.in_c := b
io.out_b := mac_unit.io.out_d
c1 := d
}.otherwise {
io.out_c := c2
mac_unit.io.in_b := c1.asTypeOf(inputType)
mac_unit.io.in_c := b
io.out_b := mac_unit.io.out_d
c2 := d
}
}.otherwise {
io.bad_dataflow := true.B
//assert(false.B, "unknown dataflow")
io.out_c := DontCare
io.out_b := DontCare
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c2
}
when (!valid) {
c1 := c1
c2 := c2
mac_unit.io.in_b := DontCare
mac_unit.io.in_c := DontCare
}
}
File Arithmetic.scala:
// A simple type class for Chisel datatypes that can add and multiply. To add your own type, simply create your own:
// implicit MyTypeArithmetic extends Arithmetic[MyType] { ... }
package gemmini
import chisel3._
import chisel3.util._
import hardfloat._
// Bundles that represent the raw bits of custom datatypes
case class Float(expWidth: Int, sigWidth: Int) extends Bundle {
val bits = UInt((expWidth + sigWidth).W)
val bias: Int = (1 << (expWidth-1)) - 1
}
case class DummySInt(w: Int) extends Bundle {
val bits = UInt(w.W)
def dontCare: DummySInt = {
val o = Wire(new DummySInt(w))
o.bits := 0.U
o
}
}
// The Arithmetic typeclass which implements various arithmetic operations on custom datatypes
abstract class Arithmetic[T <: Data] {
implicit def cast(t: T): ArithmeticOps[T]
}
abstract class ArithmeticOps[T <: Data](self: T) {
def *(t: T): T
def mac(m1: T, m2: T): T // Returns (m1 * m2 + self)
def +(t: T): T
def -(t: T): T
def >>(u: UInt): T // This is a rounding shift! Rounds away from 0
def >(t: T): Bool
def identity: T
def withWidthOf(t: T): T
def clippedToWidthOf(t: T): T // Like "withWidthOf", except that it saturates
def relu: T
def zero: T
def minimum: T
// Optional parameters, which only need to be defined if you want to enable various optimizations for transformers
def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[T])] = None
def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[T])] = None
def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = None
def mult_with_reciprocal[U <: Data](reciprocal: U) = self
}
object Arithmetic {
implicit object UIntArithmetic extends Arithmetic[UInt] {
override implicit def cast(self: UInt) = new ArithmeticOps(self) {
override def *(t: UInt) = self * t
override def mac(m1: UInt, m2: UInt) = m1 * m2 + self
override def +(t: UInt) = self + t
override def -(t: UInt) = self - t
override def >>(u: UInt) = {
// The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm
// TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here?
val point_five = Mux(u === 0.U, 0.U, self(u - 1.U))
val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U
val ones_digit = self(u)
val r = point_five & (zeros | ones_digit)
(self >> u).asUInt + r
}
override def >(t: UInt): Bool = self > t
override def withWidthOf(t: UInt) = self.asTypeOf(t)
override def clippedToWidthOf(t: UInt) = {
val sat = ((1 << (t.getWidth-1))-1).U
Mux(self > sat, sat, self)(t.getWidth-1, 0)
}
override def relu: UInt = self
override def zero: UInt = 0.U
override def identity: UInt = 1.U
override def minimum: UInt = 0.U
}
}
implicit object SIntArithmetic extends Arithmetic[SInt] {
override implicit def cast(self: SInt) = new ArithmeticOps(self) {
override def *(t: SInt) = self * t
override def mac(m1: SInt, m2: SInt) = m1 * m2 + self
override def +(t: SInt) = self + t
override def -(t: SInt) = self - t
override def >>(u: UInt) = {
// The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm
// TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here?
val point_five = Mux(u === 0.U, 0.U, self(u - 1.U))
val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U
val ones_digit = self(u)
val r = (point_five & (zeros | ones_digit)).asBool
(self >> u).asSInt + Mux(r, 1.S, 0.S)
}
override def >(t: SInt): Bool = self > t
override def withWidthOf(t: SInt) = {
if (self.getWidth >= t.getWidth)
self(t.getWidth-1, 0).asSInt
else {
val sign_bits = t.getWidth - self.getWidth
val sign = self(self.getWidth-1)
Cat(Cat(Seq.fill(sign_bits)(sign)), self).asTypeOf(t)
}
}
override def clippedToWidthOf(t: SInt): SInt = {
val maxsat = ((1 << (t.getWidth-1))-1).S
val minsat = (-(1 << (t.getWidth-1))).S
MuxCase(self, Seq((self > maxsat) -> maxsat, (self < minsat) -> minsat))(t.getWidth-1, 0).asSInt
}
override def relu: SInt = Mux(self >= 0.S, self, 0.S)
override def zero: SInt = 0.S
override def identity: SInt = 1.S
override def minimum: SInt = (-(1 << (self.getWidth-1))).S
override def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = {
// TODO this uses a floating point divider, but we should use an integer divider instead
val input = Wire(Decoupled(denom_t.cloneType))
val output = Wire(Decoupled(self.cloneType))
// We translate our integer to floating-point form so that we can use the hardfloat divider
val expWidth = log2Up(self.getWidth) + 1
val sigWidth = self.getWidth
def sin_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def uin_to_float(x: UInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := false.B
in_to_rec_fn.io.in := x
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = sin_to_float(self)
val denom_rec = uin_to_float(input.bits)
// Instantiate the hardloat divider
val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options))
input.ready := divider.io.inReady
divider.io.inValid := input.valid
divider.io.sqrtOp := false.B
divider.io.a := self_rec
divider.io.b := denom_rec
divider.io.roundingMode := consts.round_minMag
divider.io.detectTininess := consts.tininess_afterRounding
output.valid := divider.io.outValid_div
output.bits := float_to_in(divider.io.out)
assert(!output.valid || output.ready)
Some((input, output))
}
override def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = {
// TODO this uses a floating point divider, but we should use an integer divider instead
val input = Wire(Decoupled(UInt(0.W)))
val output = Wire(Decoupled(self.cloneType))
input.bits := DontCare
// We translate our integer to floating-point form so that we can use the hardfloat divider
val expWidth = log2Up(self.getWidth) + 1
val sigWidth = self.getWidth
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = in_to_float(self)
// Instantiate the hardloat sqrt
val sqrter = Module(new DivSqrtRecFN_small(expWidth, sigWidth, 0))
input.ready := sqrter.io.inReady
sqrter.io.inValid := input.valid
sqrter.io.sqrtOp := true.B
sqrter.io.a := self_rec
sqrter.io.b := DontCare
sqrter.io.roundingMode := consts.round_minMag
sqrter.io.detectTininess := consts.tininess_afterRounding
output.valid := sqrter.io.outValid_sqrt
output.bits := float_to_in(sqrter.io.out)
assert(!output.valid || output.ready)
Some((input, output))
}
override def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = u match {
case Float(expWidth, sigWidth) =>
val input = Wire(Decoupled(UInt(0.W)))
val output = Wire(Decoupled(u.cloneType))
input.bits := DontCare
// We translate our integer to floating-point form so that we can use the hardfloat divider
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
val self_rec = in_to_float(self)
val one_rec = in_to_float(1.S)
// Instantiate the hardloat divider
val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options))
input.ready := divider.io.inReady
divider.io.inValid := input.valid
divider.io.sqrtOp := false.B
divider.io.a := one_rec
divider.io.b := self_rec
divider.io.roundingMode := consts.round_near_even
divider.io.detectTininess := consts.tininess_afterRounding
output.valid := divider.io.outValid_div
output.bits := fNFromRecFN(expWidth, sigWidth, divider.io.out).asTypeOf(u)
assert(!output.valid || output.ready)
Some((input, output))
case _ => None
}
override def mult_with_reciprocal[U <: Data](reciprocal: U): SInt = reciprocal match {
case recip @ Float(expWidth, sigWidth) =>
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = in_to_float(self)
val reciprocal_rec = recFNFromFN(expWidth, sigWidth, recip.bits)
// Instantiate the hardloat divider
val muladder = Module(new MulRecFN(expWidth, sigWidth))
muladder.io.roundingMode := consts.round_near_even
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := reciprocal_rec
float_to_in(muladder.io.out)
case _ => self
}
}
}
implicit object FloatArithmetic extends Arithmetic[Float] {
// TODO Floating point arithmetic currently switches between recoded and standard formats for every operation. However, it should stay in the recoded format as it travels through the systolic array
override implicit def cast(self: Float): ArithmeticOps[Float] = new ArithmeticOps(self) {
override def *(t: Float): Float = {
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth))
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := t_rec_resized
val out = Wire(Float(self.expWidth, self.sigWidth))
out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
out
}
override def mac(m1: Float, m2: Float): Float = {
// Recode all operands
val m1_rec = recFNFromFN(m1.expWidth, m1.sigWidth, m1.bits)
val m2_rec = recFNFromFN(m2.expWidth, m2.sigWidth, m2.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Resize m1 to self's width
val m1_resizer = Module(new RecFNToRecFN(m1.expWidth, m1.sigWidth, self.expWidth, self.sigWidth))
m1_resizer.io.in := m1_rec
m1_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
m1_resizer.io.detectTininess := consts.tininess_afterRounding
val m1_rec_resized = m1_resizer.io.out
// Resize m2 to self's width
val m2_resizer = Module(new RecFNToRecFN(m2.expWidth, m2.sigWidth, self.expWidth, self.sigWidth))
m2_resizer.io.in := m2_rec
m2_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
m2_resizer.io.detectTininess := consts.tininess_afterRounding
val m2_rec_resized = m2_resizer.io.out
// Perform multiply-add
val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth))
muladder.io.op := 0.U
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := m1_rec_resized
muladder.io.b := m2_rec_resized
muladder.io.c := self_rec
// Convert result to standard format // TODO remove these intermediate recodings
val out = Wire(Float(self.expWidth, self.sigWidth))
out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
out
}
override def +(t: Float): Float = {
require(self.getWidth >= t.getWidth) // This just makes it easier to write the resizing code
// Recode all operands
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Generate 1 as a float
val in_to_rec_fn = Module(new INToRecFN(1, self.expWidth, self.sigWidth))
in_to_rec_fn.io.signedIn := false.B
in_to_rec_fn.io.in := 1.U
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
val one_rec = in_to_rec_fn.io.out
// Resize t
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
// Perform addition
val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth))
muladder.io.op := 0.U
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := t_rec_resized
muladder.io.b := one_rec
muladder.io.c := self_rec
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
result
}
override def -(t: Float): Float = {
val t_sgn = t.bits(t.getWidth-1)
val neg_t = Cat(~t_sgn, t.bits(t.getWidth-2,0)).asTypeOf(t)
self + neg_t
}
override def >>(u: UInt): Float = {
// Recode self
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Get 2^(-u) as a recoded float
val shift_exp = Wire(UInt(self.expWidth.W))
shift_exp := self.bias.U - u
val shift_fn = Cat(0.U(1.W), shift_exp, 0.U((self.sigWidth-1).W))
val shift_rec = recFNFromFN(self.expWidth, self.sigWidth, shift_fn)
assert(shift_exp =/= 0.U, "scaling by denormalized numbers is not currently supported")
// Multiply self and 2^(-u)
val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth))
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := shift_rec
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
result
}
override def >(t: Float): Bool = {
// Recode all operands
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Resize t to self's width
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
val comparator = Module(new CompareRecFN(self.expWidth, self.sigWidth))
comparator.io.a := self_rec
comparator.io.b := t_rec_resized
comparator.io.signaling := false.B
comparator.io.gt
}
override def withWidthOf(t: Float): Float = {
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth))
resizer.io.in := self_rec
resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
resizer.io.detectTininess := consts.tininess_afterRounding
val result = Wire(Float(t.expWidth, t.sigWidth))
result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out)
result
}
override def clippedToWidthOf(t: Float): Float = {
// TODO check for overflow. Right now, we just assume that overflow doesn't happen
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth))
resizer.io.in := self_rec
resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
resizer.io.detectTininess := consts.tininess_afterRounding
val result = Wire(Float(t.expWidth, t.sigWidth))
result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out)
result
}
override def relu: Float = {
val raw = rawFloatFromFN(self.expWidth, self.sigWidth, self.bits)
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := Mux(!raw.isZero && raw.sign, 0.U, self.bits)
result
}
override def zero: Float = 0.U.asTypeOf(self)
override def identity: Float = Cat(0.U(2.W), ~(0.U((self.expWidth-1).W)), 0.U((self.sigWidth-1).W)).asTypeOf(self)
override def minimum: Float = Cat(1.U, ~(0.U(self.expWidth.W)), 0.U((self.sigWidth-1).W)).asTypeOf(self)
}
}
implicit object DummySIntArithmetic extends Arithmetic[DummySInt] {
override implicit def cast(self: DummySInt) = new ArithmeticOps(self) {
override def *(t: DummySInt) = self.dontCare
override def mac(m1: DummySInt, m2: DummySInt) = self.dontCare
override def +(t: DummySInt) = self.dontCare
override def -(t: DummySInt) = self.dontCare
override def >>(t: UInt) = self.dontCare
override def >(t: DummySInt): Bool = false.B
override def identity = self.dontCare
override def withWidthOf(t: DummySInt) = self.dontCare
override def clippedToWidthOf(t: DummySInt) = self.dontCare
override def relu = self.dontCare
override def zero = self.dontCare
override def minimum: DummySInt = self.dontCare
}
}
}
| module MacUnit_68( // @[PE.scala:14:7]
input clock, // @[PE.scala:14:7]
input reset, // @[PE.scala:14:7]
input [7:0] io_in_a, // @[PE.scala:16:14]
input [7:0] io_in_b, // @[PE.scala:16:14]
input [31:0] io_in_c, // @[PE.scala:16:14]
output [19:0] io_out_d // @[PE.scala:16:14]
);
wire [7:0] io_in_a_0 = io_in_a; // @[PE.scala:14:7]
wire [7:0] io_in_b_0 = io_in_b; // @[PE.scala:14:7]
wire [31:0] io_in_c_0 = io_in_c; // @[PE.scala:14:7]
wire [19:0] io_out_d_0; // @[PE.scala:14:7]
wire [15:0] _io_out_d_T = {{8{io_in_a_0[7]}}, io_in_a_0} * {{8{io_in_b_0[7]}}, io_in_b_0}; // @[PE.scala:14:7]
wire [32:0] _io_out_d_T_1 = {{17{_io_out_d_T[15]}}, _io_out_d_T} + {io_in_c_0[31], io_in_c_0}; // @[PE.scala:14:7]
wire [31:0] _io_out_d_T_2 = _io_out_d_T_1[31:0]; // @[Arithmetic.scala:93:54]
wire [31:0] _io_out_d_T_3 = _io_out_d_T_2; // @[Arithmetic.scala:93:54]
assign io_out_d_0 = _io_out_d_T_3[19:0]; // @[PE.scala:14:7, :23:12]
assign io_out_d = io_out_d_0; // @[PE.scala:14:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File DescribedSRAM.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3.{Data, SyncReadMem, Vec}
import chisel3.util.log2Ceil
object DescribedSRAM {
def apply[T <: Data](
name: String,
desc: String,
size: BigInt, // depth
data: T
): SyncReadMem[T] = {
val mem = SyncReadMem(size, data)
mem.suggestName(name)
val granWidth = data match {
case v: Vec[_] => v.head.getWidth
case d => d.getWidth
}
val uid = 0
Annotated.srams(
component = mem,
name = name,
address_width = log2Ceil(size),
data_width = data.getWidth,
depth = size,
description = desc,
write_mask_granularity = granWidth
)
mem
}
}
| module rockettile_icache_data_arrays_1( // @[DescribedSRAM.scala:17:26]
input [6:0] RW0_addr,
input RW0_en,
input RW0_clk,
input RW0_wmode,
input [255:0] RW0_wdata,
output [255:0] RW0_rdata,
input [7:0] RW0_wmask
);
rockettile_icache_data_arrays_0_ext rockettile_icache_data_arrays_0_ext ( // @[DescribedSRAM.scala:17:26]
.RW0_addr (RW0_addr),
.RW0_en (RW0_en),
.RW0_clk (RW0_clk),
.RW0_wmode (RW0_wmode),
.RW0_wdata (RW0_wdata),
.RW0_rdata (RW0_rdata),
.RW0_wmask (RW0_wmask)
); // @[DescribedSRAM.scala:17:26]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File PE.scala:
// See README.md for license details.
package gemmini
import chisel3._
import chisel3.util._
class PEControl[T <: Data : Arithmetic](accType: T) extends Bundle {
val dataflow = UInt(1.W) // TODO make this an Enum
val propagate = UInt(1.W) // Which register should be propagated (and which should be accumulated)?
val shift = UInt(log2Up(accType.getWidth).W) // TODO this isn't correct for Floats
}
class MacUnit[T <: Data](inputType: T, cType: T, dType: T) (implicit ev: Arithmetic[T]) extends Module {
import ev._
val io = IO(new Bundle {
val in_a = Input(inputType)
val in_b = Input(inputType)
val in_c = Input(cType)
val out_d = Output(dType)
})
io.out_d := io.in_c.mac(io.in_a, io.in_b)
}
// TODO update documentation
/**
* A PE implementing a MAC operation. Configured as fully combinational when integrated into a Mesh.
* @param width Data width of operands
*/
class PE[T <: Data](inputType: T, outputType: T, accType: T, df: Dataflow.Value, max_simultaneous_matmuls: Int)
(implicit ev: Arithmetic[T]) extends Module { // Debugging variables
import ev._
val io = IO(new Bundle {
val in_a = Input(inputType)
val in_b = Input(outputType)
val in_d = Input(outputType)
val out_a = Output(inputType)
val out_b = Output(outputType)
val out_c = Output(outputType)
val in_control = Input(new PEControl(accType))
val out_control = Output(new PEControl(accType))
val in_id = Input(UInt(log2Up(max_simultaneous_matmuls).W))
val out_id = Output(UInt(log2Up(max_simultaneous_matmuls).W))
val in_last = Input(Bool())
val out_last = Output(Bool())
val in_valid = Input(Bool())
val out_valid = Output(Bool())
val bad_dataflow = Output(Bool())
})
val cType = if (df == Dataflow.WS) inputType else accType
// When creating PEs that support multiple dataflows, the
// elaboration/synthesis tools often fail to consolidate and de-duplicate
// MAC units. To force mac circuitry to be re-used, we create a "mac_unit"
// module here which just performs a single MAC operation
val mac_unit = Module(new MacUnit(inputType,
if (df == Dataflow.WS) outputType else accType, outputType))
val a = io.in_a
val b = io.in_b
val d = io.in_d
val c1 = Reg(cType)
val c2 = Reg(cType)
val dataflow = io.in_control.dataflow
val prop = io.in_control.propagate
val shift = io.in_control.shift
val id = io.in_id
val last = io.in_last
val valid = io.in_valid
io.out_a := a
io.out_control.dataflow := dataflow
io.out_control.propagate := prop
io.out_control.shift := shift
io.out_id := id
io.out_last := last
io.out_valid := valid
mac_unit.io.in_a := a
val last_s = RegEnable(prop, valid)
val flip = last_s =/= prop
val shift_offset = Mux(flip, shift, 0.U)
// Which dataflow are we using?
val OUTPUT_STATIONARY = Dataflow.OS.id.U(1.W)
val WEIGHT_STATIONARY = Dataflow.WS.id.U(1.W)
// Is c1 being computed on, or propagated forward (in the output-stationary dataflow)?
val COMPUTE = 0.U(1.W)
val PROPAGATE = 1.U(1.W)
io.bad_dataflow := false.B
when ((df == Dataflow.OS).B || ((df == Dataflow.BOTH).B && dataflow === OUTPUT_STATIONARY)) {
when(prop === PROPAGATE) {
io.out_c := (c1 >> shift_offset).clippedToWidthOf(outputType)
io.out_b := b
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c2
c2 := mac_unit.io.out_d
c1 := d.withWidthOf(cType)
}.otherwise {
io.out_c := (c2 >> shift_offset).clippedToWidthOf(outputType)
io.out_b := b
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c1
c1 := mac_unit.io.out_d
c2 := d.withWidthOf(cType)
}
}.elsewhen ((df == Dataflow.WS).B || ((df == Dataflow.BOTH).B && dataflow === WEIGHT_STATIONARY)) {
when(prop === PROPAGATE) {
io.out_c := c1
mac_unit.io.in_b := c2.asTypeOf(inputType)
mac_unit.io.in_c := b
io.out_b := mac_unit.io.out_d
c1 := d
}.otherwise {
io.out_c := c2
mac_unit.io.in_b := c1.asTypeOf(inputType)
mac_unit.io.in_c := b
io.out_b := mac_unit.io.out_d
c2 := d
}
}.otherwise {
io.bad_dataflow := true.B
//assert(false.B, "unknown dataflow")
io.out_c := DontCare
io.out_b := DontCare
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c2
}
when (!valid) {
c1 := c1
c2 := c2
mac_unit.io.in_b := DontCare
mac_unit.io.in_c := DontCare
}
}
File Arithmetic.scala:
// A simple type class for Chisel datatypes that can add and multiply. To add your own type, simply create your own:
// implicit MyTypeArithmetic extends Arithmetic[MyType] { ... }
package gemmini
import chisel3._
import chisel3.util._
import hardfloat._
// Bundles that represent the raw bits of custom datatypes
case class Float(expWidth: Int, sigWidth: Int) extends Bundle {
val bits = UInt((expWidth + sigWidth).W)
val bias: Int = (1 << (expWidth-1)) - 1
}
case class DummySInt(w: Int) extends Bundle {
val bits = UInt(w.W)
def dontCare: DummySInt = {
val o = Wire(new DummySInt(w))
o.bits := 0.U
o
}
}
// The Arithmetic typeclass which implements various arithmetic operations on custom datatypes
abstract class Arithmetic[T <: Data] {
implicit def cast(t: T): ArithmeticOps[T]
}
abstract class ArithmeticOps[T <: Data](self: T) {
def *(t: T): T
def mac(m1: T, m2: T): T // Returns (m1 * m2 + self)
def +(t: T): T
def -(t: T): T
def >>(u: UInt): T // This is a rounding shift! Rounds away from 0
def >(t: T): Bool
def identity: T
def withWidthOf(t: T): T
def clippedToWidthOf(t: T): T // Like "withWidthOf", except that it saturates
def relu: T
def zero: T
def minimum: T
// Optional parameters, which only need to be defined if you want to enable various optimizations for transformers
def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[T])] = None
def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[T])] = None
def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = None
def mult_with_reciprocal[U <: Data](reciprocal: U) = self
}
object Arithmetic {
implicit object UIntArithmetic extends Arithmetic[UInt] {
override implicit def cast(self: UInt) = new ArithmeticOps(self) {
override def *(t: UInt) = self * t
override def mac(m1: UInt, m2: UInt) = m1 * m2 + self
override def +(t: UInt) = self + t
override def -(t: UInt) = self - t
override def >>(u: UInt) = {
// The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm
// TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here?
val point_five = Mux(u === 0.U, 0.U, self(u - 1.U))
val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U
val ones_digit = self(u)
val r = point_five & (zeros | ones_digit)
(self >> u).asUInt + r
}
override def >(t: UInt): Bool = self > t
override def withWidthOf(t: UInt) = self.asTypeOf(t)
override def clippedToWidthOf(t: UInt) = {
val sat = ((1 << (t.getWidth-1))-1).U
Mux(self > sat, sat, self)(t.getWidth-1, 0)
}
override def relu: UInt = self
override def zero: UInt = 0.U
override def identity: UInt = 1.U
override def minimum: UInt = 0.U
}
}
implicit object SIntArithmetic extends Arithmetic[SInt] {
override implicit def cast(self: SInt) = new ArithmeticOps(self) {
override def *(t: SInt) = self * t
override def mac(m1: SInt, m2: SInt) = m1 * m2 + self
override def +(t: SInt) = self + t
override def -(t: SInt) = self - t
override def >>(u: UInt) = {
// The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm
// TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here?
val point_five = Mux(u === 0.U, 0.U, self(u - 1.U))
val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U
val ones_digit = self(u)
val r = (point_five & (zeros | ones_digit)).asBool
(self >> u).asSInt + Mux(r, 1.S, 0.S)
}
override def >(t: SInt): Bool = self > t
override def withWidthOf(t: SInt) = {
if (self.getWidth >= t.getWidth)
self(t.getWidth-1, 0).asSInt
else {
val sign_bits = t.getWidth - self.getWidth
val sign = self(self.getWidth-1)
Cat(Cat(Seq.fill(sign_bits)(sign)), self).asTypeOf(t)
}
}
override def clippedToWidthOf(t: SInt): SInt = {
val maxsat = ((1 << (t.getWidth-1))-1).S
val minsat = (-(1 << (t.getWidth-1))).S
MuxCase(self, Seq((self > maxsat) -> maxsat, (self < minsat) -> minsat))(t.getWidth-1, 0).asSInt
}
override def relu: SInt = Mux(self >= 0.S, self, 0.S)
override def zero: SInt = 0.S
override def identity: SInt = 1.S
override def minimum: SInt = (-(1 << (self.getWidth-1))).S
override def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = {
// TODO this uses a floating point divider, but we should use an integer divider instead
val input = Wire(Decoupled(denom_t.cloneType))
val output = Wire(Decoupled(self.cloneType))
// We translate our integer to floating-point form so that we can use the hardfloat divider
val expWidth = log2Up(self.getWidth) + 1
val sigWidth = self.getWidth
def sin_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def uin_to_float(x: UInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := false.B
in_to_rec_fn.io.in := x
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = sin_to_float(self)
val denom_rec = uin_to_float(input.bits)
// Instantiate the hardloat divider
val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options))
input.ready := divider.io.inReady
divider.io.inValid := input.valid
divider.io.sqrtOp := false.B
divider.io.a := self_rec
divider.io.b := denom_rec
divider.io.roundingMode := consts.round_minMag
divider.io.detectTininess := consts.tininess_afterRounding
output.valid := divider.io.outValid_div
output.bits := float_to_in(divider.io.out)
assert(!output.valid || output.ready)
Some((input, output))
}
override def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = {
// TODO this uses a floating point divider, but we should use an integer divider instead
val input = Wire(Decoupled(UInt(0.W)))
val output = Wire(Decoupled(self.cloneType))
input.bits := DontCare
// We translate our integer to floating-point form so that we can use the hardfloat divider
val expWidth = log2Up(self.getWidth) + 1
val sigWidth = self.getWidth
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = in_to_float(self)
// Instantiate the hardloat sqrt
val sqrter = Module(new DivSqrtRecFN_small(expWidth, sigWidth, 0))
input.ready := sqrter.io.inReady
sqrter.io.inValid := input.valid
sqrter.io.sqrtOp := true.B
sqrter.io.a := self_rec
sqrter.io.b := DontCare
sqrter.io.roundingMode := consts.round_minMag
sqrter.io.detectTininess := consts.tininess_afterRounding
output.valid := sqrter.io.outValid_sqrt
output.bits := float_to_in(sqrter.io.out)
assert(!output.valid || output.ready)
Some((input, output))
}
override def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = u match {
case Float(expWidth, sigWidth) =>
val input = Wire(Decoupled(UInt(0.W)))
val output = Wire(Decoupled(u.cloneType))
input.bits := DontCare
// We translate our integer to floating-point form so that we can use the hardfloat divider
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
val self_rec = in_to_float(self)
val one_rec = in_to_float(1.S)
// Instantiate the hardloat divider
val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options))
input.ready := divider.io.inReady
divider.io.inValid := input.valid
divider.io.sqrtOp := false.B
divider.io.a := one_rec
divider.io.b := self_rec
divider.io.roundingMode := consts.round_near_even
divider.io.detectTininess := consts.tininess_afterRounding
output.valid := divider.io.outValid_div
output.bits := fNFromRecFN(expWidth, sigWidth, divider.io.out).asTypeOf(u)
assert(!output.valid || output.ready)
Some((input, output))
case _ => None
}
override def mult_with_reciprocal[U <: Data](reciprocal: U): SInt = reciprocal match {
case recip @ Float(expWidth, sigWidth) =>
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = in_to_float(self)
val reciprocal_rec = recFNFromFN(expWidth, sigWidth, recip.bits)
// Instantiate the hardloat divider
val muladder = Module(new MulRecFN(expWidth, sigWidth))
muladder.io.roundingMode := consts.round_near_even
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := reciprocal_rec
float_to_in(muladder.io.out)
case _ => self
}
}
}
implicit object FloatArithmetic extends Arithmetic[Float] {
// TODO Floating point arithmetic currently switches between recoded and standard formats for every operation. However, it should stay in the recoded format as it travels through the systolic array
override implicit def cast(self: Float): ArithmeticOps[Float] = new ArithmeticOps(self) {
override def *(t: Float): Float = {
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth))
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := t_rec_resized
val out = Wire(Float(self.expWidth, self.sigWidth))
out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
out
}
override def mac(m1: Float, m2: Float): Float = {
// Recode all operands
val m1_rec = recFNFromFN(m1.expWidth, m1.sigWidth, m1.bits)
val m2_rec = recFNFromFN(m2.expWidth, m2.sigWidth, m2.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Resize m1 to self's width
val m1_resizer = Module(new RecFNToRecFN(m1.expWidth, m1.sigWidth, self.expWidth, self.sigWidth))
m1_resizer.io.in := m1_rec
m1_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
m1_resizer.io.detectTininess := consts.tininess_afterRounding
val m1_rec_resized = m1_resizer.io.out
// Resize m2 to self's width
val m2_resizer = Module(new RecFNToRecFN(m2.expWidth, m2.sigWidth, self.expWidth, self.sigWidth))
m2_resizer.io.in := m2_rec
m2_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
m2_resizer.io.detectTininess := consts.tininess_afterRounding
val m2_rec_resized = m2_resizer.io.out
// Perform multiply-add
val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth))
muladder.io.op := 0.U
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := m1_rec_resized
muladder.io.b := m2_rec_resized
muladder.io.c := self_rec
// Convert result to standard format // TODO remove these intermediate recodings
val out = Wire(Float(self.expWidth, self.sigWidth))
out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
out
}
override def +(t: Float): Float = {
require(self.getWidth >= t.getWidth) // This just makes it easier to write the resizing code
// Recode all operands
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Generate 1 as a float
val in_to_rec_fn = Module(new INToRecFN(1, self.expWidth, self.sigWidth))
in_to_rec_fn.io.signedIn := false.B
in_to_rec_fn.io.in := 1.U
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
val one_rec = in_to_rec_fn.io.out
// Resize t
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
// Perform addition
val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth))
muladder.io.op := 0.U
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := t_rec_resized
muladder.io.b := one_rec
muladder.io.c := self_rec
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
result
}
override def -(t: Float): Float = {
val t_sgn = t.bits(t.getWidth-1)
val neg_t = Cat(~t_sgn, t.bits(t.getWidth-2,0)).asTypeOf(t)
self + neg_t
}
override def >>(u: UInt): Float = {
// Recode self
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Get 2^(-u) as a recoded float
val shift_exp = Wire(UInt(self.expWidth.W))
shift_exp := self.bias.U - u
val shift_fn = Cat(0.U(1.W), shift_exp, 0.U((self.sigWidth-1).W))
val shift_rec = recFNFromFN(self.expWidth, self.sigWidth, shift_fn)
assert(shift_exp =/= 0.U, "scaling by denormalized numbers is not currently supported")
// Multiply self and 2^(-u)
val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth))
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := shift_rec
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
result
}
override def >(t: Float): Bool = {
// Recode all operands
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Resize t to self's width
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
val comparator = Module(new CompareRecFN(self.expWidth, self.sigWidth))
comparator.io.a := self_rec
comparator.io.b := t_rec_resized
comparator.io.signaling := false.B
comparator.io.gt
}
override def withWidthOf(t: Float): Float = {
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth))
resizer.io.in := self_rec
resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
resizer.io.detectTininess := consts.tininess_afterRounding
val result = Wire(Float(t.expWidth, t.sigWidth))
result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out)
result
}
override def clippedToWidthOf(t: Float): Float = {
// TODO check for overflow. Right now, we just assume that overflow doesn't happen
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth))
resizer.io.in := self_rec
resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
resizer.io.detectTininess := consts.tininess_afterRounding
val result = Wire(Float(t.expWidth, t.sigWidth))
result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out)
result
}
override def relu: Float = {
val raw = rawFloatFromFN(self.expWidth, self.sigWidth, self.bits)
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := Mux(!raw.isZero && raw.sign, 0.U, self.bits)
result
}
override def zero: Float = 0.U.asTypeOf(self)
override def identity: Float = Cat(0.U(2.W), ~(0.U((self.expWidth-1).W)), 0.U((self.sigWidth-1).W)).asTypeOf(self)
override def minimum: Float = Cat(1.U, ~(0.U(self.expWidth.W)), 0.U((self.sigWidth-1).W)).asTypeOf(self)
}
}
implicit object DummySIntArithmetic extends Arithmetic[DummySInt] {
override implicit def cast(self: DummySInt) = new ArithmeticOps(self) {
override def *(t: DummySInt) = self.dontCare
override def mac(m1: DummySInt, m2: DummySInt) = self.dontCare
override def +(t: DummySInt) = self.dontCare
override def -(t: DummySInt) = self.dontCare
override def >>(t: UInt) = self.dontCare
override def >(t: DummySInt): Bool = false.B
override def identity = self.dontCare
override def withWidthOf(t: DummySInt) = self.dontCare
override def clippedToWidthOf(t: DummySInt) = self.dontCare
override def relu = self.dontCare
override def zero = self.dontCare
override def minimum: DummySInt = self.dontCare
}
}
}
| module PE_374( // @[PE.scala:31:7]
input clock, // @[PE.scala:31:7]
input reset, // @[PE.scala:31:7]
input [7:0] io_in_a, // @[PE.scala:35:14]
input [19:0] io_in_b, // @[PE.scala:35:14]
input [19:0] io_in_d, // @[PE.scala:35:14]
output [7:0] io_out_a, // @[PE.scala:35:14]
output [19:0] io_out_b, // @[PE.scala:35:14]
output [19:0] io_out_c, // @[PE.scala:35:14]
input io_in_control_dataflow, // @[PE.scala:35:14]
input io_in_control_propagate, // @[PE.scala:35:14]
input [4:0] io_in_control_shift, // @[PE.scala:35:14]
output io_out_control_dataflow, // @[PE.scala:35:14]
output io_out_control_propagate, // @[PE.scala:35:14]
output [4:0] io_out_control_shift, // @[PE.scala:35:14]
input [2:0] io_in_id, // @[PE.scala:35:14]
output [2:0] io_out_id, // @[PE.scala:35:14]
input io_in_last, // @[PE.scala:35:14]
output io_out_last, // @[PE.scala:35:14]
input io_in_valid, // @[PE.scala:35:14]
output io_out_valid, // @[PE.scala:35:14]
output io_bad_dataflow // @[PE.scala:35:14]
);
wire [19:0] _mac_unit_io_out_d; // @[PE.scala:64:24]
wire [7:0] io_in_a_0 = io_in_a; // @[PE.scala:31:7]
wire [19:0] io_in_b_0 = io_in_b; // @[PE.scala:31:7]
wire [19:0] io_in_d_0 = io_in_d; // @[PE.scala:31:7]
wire io_in_control_dataflow_0 = io_in_control_dataflow; // @[PE.scala:31:7]
wire io_in_control_propagate_0 = io_in_control_propagate; // @[PE.scala:31:7]
wire [4:0] io_in_control_shift_0 = io_in_control_shift; // @[PE.scala:31:7]
wire [2:0] io_in_id_0 = io_in_id; // @[PE.scala:31:7]
wire io_in_last_0 = io_in_last; // @[PE.scala:31:7]
wire io_in_valid_0 = io_in_valid; // @[PE.scala:31:7]
wire io_bad_dataflow_0 = 1'h0; // @[PE.scala:31:7]
wire [7:0] io_out_a_0 = io_in_a_0; // @[PE.scala:31:7]
wire [19:0] _mac_unit_io_in_b_T = io_in_b_0; // @[PE.scala:31:7, :106:37]
wire [19:0] _mac_unit_io_in_b_T_2 = io_in_b_0; // @[PE.scala:31:7, :113:37]
wire [19:0] _mac_unit_io_in_b_T_8 = io_in_b_0; // @[PE.scala:31:7, :137:35]
wire [19:0] c1_lo_1 = io_in_d_0; // @[PE.scala:31:7]
wire [19:0] c2_lo_1 = io_in_d_0; // @[PE.scala:31:7]
wire io_out_control_dataflow_0 = io_in_control_dataflow_0; // @[PE.scala:31:7]
wire io_out_control_propagate_0 = io_in_control_propagate_0; // @[PE.scala:31:7]
wire [4:0] io_out_control_shift_0 = io_in_control_shift_0; // @[PE.scala:31:7]
wire [2:0] io_out_id_0 = io_in_id_0; // @[PE.scala:31:7]
wire io_out_last_0 = io_in_last_0; // @[PE.scala:31:7]
wire io_out_valid_0 = io_in_valid_0; // @[PE.scala:31:7]
wire [19:0] io_out_b_0; // @[PE.scala:31:7]
wire [19:0] io_out_c_0; // @[PE.scala:31:7]
reg [31:0] c1; // @[PE.scala:70:15]
wire [31:0] _io_out_c_zeros_T_1 = c1; // @[PE.scala:70:15]
wire [31:0] _mac_unit_io_in_b_T_6 = c1; // @[PE.scala:70:15, :127:38]
reg [31:0] c2; // @[PE.scala:71:15]
wire [31:0] _io_out_c_zeros_T_10 = c2; // @[PE.scala:71:15]
wire [31:0] _mac_unit_io_in_b_T_4 = c2; // @[PE.scala:71:15, :121:38]
reg last_s; // @[PE.scala:89:25]
wire flip = last_s != io_in_control_propagate_0; // @[PE.scala:31:7, :89:25, :90:21]
wire [4:0] shift_offset = flip ? io_in_control_shift_0 : 5'h0; // @[PE.scala:31:7, :90:21, :91:25]
wire _GEN = shift_offset == 5'h0; // @[PE.scala:91:25]
wire _io_out_c_point_five_T; // @[Arithmetic.scala:101:32]
assign _io_out_c_point_five_T = _GEN; // @[Arithmetic.scala:101:32]
wire _io_out_c_point_five_T_5; // @[Arithmetic.scala:101:32]
assign _io_out_c_point_five_T_5 = _GEN; // @[Arithmetic.scala:101:32]
wire [5:0] _GEN_0 = {1'h0, shift_offset} - 6'h1; // @[PE.scala:91:25]
wire [5:0] _io_out_c_point_five_T_1; // @[Arithmetic.scala:101:53]
assign _io_out_c_point_five_T_1 = _GEN_0; // @[Arithmetic.scala:101:53]
wire [5:0] _io_out_c_zeros_T_2; // @[Arithmetic.scala:102:66]
assign _io_out_c_zeros_T_2 = _GEN_0; // @[Arithmetic.scala:101:53, :102:66]
wire [5:0] _io_out_c_point_five_T_6; // @[Arithmetic.scala:101:53]
assign _io_out_c_point_five_T_6 = _GEN_0; // @[Arithmetic.scala:101:53]
wire [5:0] _io_out_c_zeros_T_11; // @[Arithmetic.scala:102:66]
assign _io_out_c_zeros_T_11 = _GEN_0; // @[Arithmetic.scala:101:53, :102:66]
wire [4:0] _io_out_c_point_five_T_2 = _io_out_c_point_five_T_1[4:0]; // @[Arithmetic.scala:101:53]
wire [31:0] _io_out_c_point_five_T_3 = $signed($signed(c1) >>> _io_out_c_point_five_T_2); // @[PE.scala:70:15]
wire _io_out_c_point_five_T_4 = _io_out_c_point_five_T_3[0]; // @[Arithmetic.scala:101:50]
wire io_out_c_point_five = ~_io_out_c_point_five_T & _io_out_c_point_five_T_4; // @[Arithmetic.scala:101:{29,32,50}]
wire _GEN_1 = shift_offset < 5'h2; // @[PE.scala:91:25]
wire _io_out_c_zeros_T; // @[Arithmetic.scala:102:27]
assign _io_out_c_zeros_T = _GEN_1; // @[Arithmetic.scala:102:27]
wire _io_out_c_zeros_T_9; // @[Arithmetic.scala:102:27]
assign _io_out_c_zeros_T_9 = _GEN_1; // @[Arithmetic.scala:102:27]
wire [4:0] _io_out_c_zeros_T_3 = _io_out_c_zeros_T_2[4:0]; // @[Arithmetic.scala:102:66]
wire [31:0] _io_out_c_zeros_T_4 = 32'h1 << _io_out_c_zeros_T_3; // @[Arithmetic.scala:102:{60,66}]
wire [32:0] _io_out_c_zeros_T_5 = {1'h0, _io_out_c_zeros_T_4} - 33'h1; // @[Arithmetic.scala:102:{60,81}]
wire [31:0] _io_out_c_zeros_T_6 = _io_out_c_zeros_T_5[31:0]; // @[Arithmetic.scala:102:81]
wire [31:0] _io_out_c_zeros_T_7 = _io_out_c_zeros_T_1 & _io_out_c_zeros_T_6; // @[Arithmetic.scala:102:{45,52,81}]
wire [31:0] _io_out_c_zeros_T_8 = _io_out_c_zeros_T ? 32'h0 : _io_out_c_zeros_T_7; // @[Arithmetic.scala:102:{24,27,52}]
wire io_out_c_zeros = |_io_out_c_zeros_T_8; // @[Arithmetic.scala:102:{24,89}]
wire [31:0] _GEN_2 = {27'h0, shift_offset}; // @[PE.scala:91:25]
wire [31:0] _GEN_3 = $signed($signed(c1) >>> _GEN_2); // @[PE.scala:70:15]
wire [31:0] _io_out_c_ones_digit_T; // @[Arithmetic.scala:103:30]
assign _io_out_c_ones_digit_T = _GEN_3; // @[Arithmetic.scala:103:30]
wire [31:0] _io_out_c_T; // @[Arithmetic.scala:107:15]
assign _io_out_c_T = _GEN_3; // @[Arithmetic.scala:103:30, :107:15]
wire io_out_c_ones_digit = _io_out_c_ones_digit_T[0]; // @[Arithmetic.scala:103:30]
wire _io_out_c_r_T = io_out_c_zeros | io_out_c_ones_digit; // @[Arithmetic.scala:102:89, :103:30, :105:38]
wire _io_out_c_r_T_1 = io_out_c_point_five & _io_out_c_r_T; // @[Arithmetic.scala:101:29, :105:{29,38}]
wire io_out_c_r = _io_out_c_r_T_1; // @[Arithmetic.scala:105:{29,53}]
wire [1:0] _io_out_c_T_1 = {1'h0, io_out_c_r}; // @[Arithmetic.scala:105:53, :107:33]
wire [32:0] _io_out_c_T_2 = {_io_out_c_T[31], _io_out_c_T} + {{31{_io_out_c_T_1[1]}}, _io_out_c_T_1}; // @[Arithmetic.scala:107:{15,28,33}]
wire [31:0] _io_out_c_T_3 = _io_out_c_T_2[31:0]; // @[Arithmetic.scala:107:28]
wire [31:0] _io_out_c_T_4 = _io_out_c_T_3; // @[Arithmetic.scala:107:28]
wire _io_out_c_T_5 = $signed(_io_out_c_T_4) > 32'sh7FFFF; // @[Arithmetic.scala:107:28, :125:33]
wire _io_out_c_T_6 = $signed(_io_out_c_T_4) < -32'sh80000; // @[Arithmetic.scala:107:28, :125:60]
wire [31:0] _io_out_c_T_7 = _io_out_c_T_6 ? 32'hFFF80000 : _io_out_c_T_4; // @[Mux.scala:126:16]
wire [31:0] _io_out_c_T_8 = _io_out_c_T_5 ? 32'h7FFFF : _io_out_c_T_7; // @[Mux.scala:126:16]
wire [19:0] _io_out_c_T_9 = _io_out_c_T_8[19:0]; // @[Mux.scala:126:16]
wire [19:0] _io_out_c_T_10 = _io_out_c_T_9; // @[Arithmetic.scala:125:{81,99}]
wire [19:0] _mac_unit_io_in_b_T_1 = _mac_unit_io_in_b_T; // @[PE.scala:106:37]
wire [7:0] _mac_unit_io_in_b_WIRE = _mac_unit_io_in_b_T_1[7:0]; // @[PE.scala:106:37]
wire c1_sign = io_in_d_0[19]; // @[PE.scala:31:7]
wire c2_sign = io_in_d_0[19]; // @[PE.scala:31:7]
wire [1:0] _GEN_4 = {2{c1_sign}}; // @[Arithmetic.scala:117:26, :118:18]
wire [1:0] c1_lo_lo_hi; // @[Arithmetic.scala:118:18]
assign c1_lo_lo_hi = _GEN_4; // @[Arithmetic.scala:118:18]
wire [1:0] c1_lo_hi_hi; // @[Arithmetic.scala:118:18]
assign c1_lo_hi_hi = _GEN_4; // @[Arithmetic.scala:118:18]
wire [1:0] c1_hi_lo_hi; // @[Arithmetic.scala:118:18]
assign c1_hi_lo_hi = _GEN_4; // @[Arithmetic.scala:118:18]
wire [1:0] c1_hi_hi_hi; // @[Arithmetic.scala:118:18]
assign c1_hi_hi_hi = _GEN_4; // @[Arithmetic.scala:118:18]
wire [2:0] c1_lo_lo = {c1_lo_lo_hi, c1_sign}; // @[Arithmetic.scala:117:26, :118:18]
wire [2:0] c1_lo_hi = {c1_lo_hi_hi, c1_sign}; // @[Arithmetic.scala:117:26, :118:18]
wire [5:0] c1_lo = {c1_lo_hi, c1_lo_lo}; // @[Arithmetic.scala:118:18]
wire [2:0] c1_hi_lo = {c1_hi_lo_hi, c1_sign}; // @[Arithmetic.scala:117:26, :118:18]
wire [2:0] c1_hi_hi = {c1_hi_hi_hi, c1_sign}; // @[Arithmetic.scala:117:26, :118:18]
wire [5:0] c1_hi = {c1_hi_hi, c1_hi_lo}; // @[Arithmetic.scala:118:18]
wire [11:0] _c1_T = {c1_hi, c1_lo}; // @[Arithmetic.scala:118:18]
wire [31:0] _c1_T_1 = {_c1_T, c1_lo_1}; // @[Arithmetic.scala:118:{14,18}]
wire [31:0] _c1_T_2 = _c1_T_1; // @[Arithmetic.scala:118:{14,61}]
wire [31:0] _c1_WIRE = _c1_T_2; // @[Arithmetic.scala:118:61]
wire [4:0] _io_out_c_point_five_T_7 = _io_out_c_point_five_T_6[4:0]; // @[Arithmetic.scala:101:53]
wire [31:0] _io_out_c_point_five_T_8 = $signed($signed(c2) >>> _io_out_c_point_five_T_7); // @[PE.scala:71:15]
wire _io_out_c_point_five_T_9 = _io_out_c_point_five_T_8[0]; // @[Arithmetic.scala:101:50]
wire io_out_c_point_five_1 = ~_io_out_c_point_five_T_5 & _io_out_c_point_five_T_9; // @[Arithmetic.scala:101:{29,32,50}]
wire [4:0] _io_out_c_zeros_T_12 = _io_out_c_zeros_T_11[4:0]; // @[Arithmetic.scala:102:66]
wire [31:0] _io_out_c_zeros_T_13 = 32'h1 << _io_out_c_zeros_T_12; // @[Arithmetic.scala:102:{60,66}]
wire [32:0] _io_out_c_zeros_T_14 = {1'h0, _io_out_c_zeros_T_13} - 33'h1; // @[Arithmetic.scala:102:{60,81}]
wire [31:0] _io_out_c_zeros_T_15 = _io_out_c_zeros_T_14[31:0]; // @[Arithmetic.scala:102:81]
wire [31:0] _io_out_c_zeros_T_16 = _io_out_c_zeros_T_10 & _io_out_c_zeros_T_15; // @[Arithmetic.scala:102:{45,52,81}]
wire [31:0] _io_out_c_zeros_T_17 = _io_out_c_zeros_T_9 ? 32'h0 : _io_out_c_zeros_T_16; // @[Arithmetic.scala:102:{24,27,52}]
wire io_out_c_zeros_1 = |_io_out_c_zeros_T_17; // @[Arithmetic.scala:102:{24,89}]
wire [31:0] _GEN_5 = $signed($signed(c2) >>> _GEN_2); // @[PE.scala:71:15]
wire [31:0] _io_out_c_ones_digit_T_1; // @[Arithmetic.scala:103:30]
assign _io_out_c_ones_digit_T_1 = _GEN_5; // @[Arithmetic.scala:103:30]
wire [31:0] _io_out_c_T_11; // @[Arithmetic.scala:107:15]
assign _io_out_c_T_11 = _GEN_5; // @[Arithmetic.scala:103:30, :107:15]
wire io_out_c_ones_digit_1 = _io_out_c_ones_digit_T_1[0]; // @[Arithmetic.scala:103:30]
wire _io_out_c_r_T_2 = io_out_c_zeros_1 | io_out_c_ones_digit_1; // @[Arithmetic.scala:102:89, :103:30, :105:38]
wire _io_out_c_r_T_3 = io_out_c_point_five_1 & _io_out_c_r_T_2; // @[Arithmetic.scala:101:29, :105:{29,38}]
wire io_out_c_r_1 = _io_out_c_r_T_3; // @[Arithmetic.scala:105:{29,53}]
wire [1:0] _io_out_c_T_12 = {1'h0, io_out_c_r_1}; // @[Arithmetic.scala:105:53, :107:33]
wire [32:0] _io_out_c_T_13 = {_io_out_c_T_11[31], _io_out_c_T_11} + {{31{_io_out_c_T_12[1]}}, _io_out_c_T_12}; // @[Arithmetic.scala:107:{15,28,33}]
wire [31:0] _io_out_c_T_14 = _io_out_c_T_13[31:0]; // @[Arithmetic.scala:107:28]
wire [31:0] _io_out_c_T_15 = _io_out_c_T_14; // @[Arithmetic.scala:107:28]
wire _io_out_c_T_16 = $signed(_io_out_c_T_15) > 32'sh7FFFF; // @[Arithmetic.scala:107:28, :125:33]
wire _io_out_c_T_17 = $signed(_io_out_c_T_15) < -32'sh80000; // @[Arithmetic.scala:107:28, :125:60]
wire [31:0] _io_out_c_T_18 = _io_out_c_T_17 ? 32'hFFF80000 : _io_out_c_T_15; // @[Mux.scala:126:16]
wire [31:0] _io_out_c_T_19 = _io_out_c_T_16 ? 32'h7FFFF : _io_out_c_T_18; // @[Mux.scala:126:16]
wire [19:0] _io_out_c_T_20 = _io_out_c_T_19[19:0]; // @[Mux.scala:126:16]
wire [19:0] _io_out_c_T_21 = _io_out_c_T_20; // @[Arithmetic.scala:125:{81,99}]
wire [19:0] _mac_unit_io_in_b_T_3 = _mac_unit_io_in_b_T_2; // @[PE.scala:113:37]
wire [7:0] _mac_unit_io_in_b_WIRE_1 = _mac_unit_io_in_b_T_3[7:0]; // @[PE.scala:113:37]
wire [1:0] _GEN_6 = {2{c2_sign}}; // @[Arithmetic.scala:117:26, :118:18]
wire [1:0] c2_lo_lo_hi; // @[Arithmetic.scala:118:18]
assign c2_lo_lo_hi = _GEN_6; // @[Arithmetic.scala:118:18]
wire [1:0] c2_lo_hi_hi; // @[Arithmetic.scala:118:18]
assign c2_lo_hi_hi = _GEN_6; // @[Arithmetic.scala:118:18]
wire [1:0] c2_hi_lo_hi; // @[Arithmetic.scala:118:18]
assign c2_hi_lo_hi = _GEN_6; // @[Arithmetic.scala:118:18]
wire [1:0] c2_hi_hi_hi; // @[Arithmetic.scala:118:18]
assign c2_hi_hi_hi = _GEN_6; // @[Arithmetic.scala:118:18]
wire [2:0] c2_lo_lo = {c2_lo_lo_hi, c2_sign}; // @[Arithmetic.scala:117:26, :118:18]
wire [2:0] c2_lo_hi = {c2_lo_hi_hi, c2_sign}; // @[Arithmetic.scala:117:26, :118:18]
wire [5:0] c2_lo = {c2_lo_hi, c2_lo_lo}; // @[Arithmetic.scala:118:18]
wire [2:0] c2_hi_lo = {c2_hi_lo_hi, c2_sign}; // @[Arithmetic.scala:117:26, :118:18]
wire [2:0] c2_hi_hi = {c2_hi_hi_hi, c2_sign}; // @[Arithmetic.scala:117:26, :118:18]
wire [5:0] c2_hi = {c2_hi_hi, c2_hi_lo}; // @[Arithmetic.scala:118:18]
wire [11:0] _c2_T = {c2_hi, c2_lo}; // @[Arithmetic.scala:118:18]
wire [31:0] _c2_T_1 = {_c2_T, c2_lo_1}; // @[Arithmetic.scala:118:{14,18}]
wire [31:0] _c2_T_2 = _c2_T_1; // @[Arithmetic.scala:118:{14,61}]
wire [31:0] _c2_WIRE = _c2_T_2; // @[Arithmetic.scala:118:61]
wire [31:0] _mac_unit_io_in_b_T_5 = _mac_unit_io_in_b_T_4; // @[PE.scala:121:38]
wire [7:0] _mac_unit_io_in_b_WIRE_2 = _mac_unit_io_in_b_T_5[7:0]; // @[PE.scala:121:38]
wire [31:0] _mac_unit_io_in_b_T_7 = _mac_unit_io_in_b_T_6; // @[PE.scala:127:38]
wire [7:0] _mac_unit_io_in_b_WIRE_3 = _mac_unit_io_in_b_T_7[7:0]; // @[PE.scala:127:38]
assign io_out_c_0 = io_in_control_dataflow_0 ? (io_in_control_propagate_0 ? c1[19:0] : c2[19:0]) : io_in_control_propagate_0 ? _io_out_c_T_10 : _io_out_c_T_21; // @[PE.scala:31:7, :70:15, :71:15, :102:95, :103:30, :104:16, :111:16, :118:101, :119:30, :120:16, :126:16]
assign io_out_b_0 = io_in_control_dataflow_0 ? _mac_unit_io_out_d : io_in_b_0; // @[PE.scala:31:7, :64:24, :102:95, :103:30, :118:101]
wire [19:0] _mac_unit_io_in_b_T_9 = _mac_unit_io_in_b_T_8; // @[PE.scala:137:35]
wire [7:0] _mac_unit_io_in_b_WIRE_4 = _mac_unit_io_in_b_T_9[7:0]; // @[PE.scala:137:35]
wire [31:0] _GEN_7 = {{12{io_in_d_0[19]}}, io_in_d_0}; // @[PE.scala:31:7, :124:10]
wire [31:0] _GEN_8 = {{12{_mac_unit_io_out_d[19]}}, _mac_unit_io_out_d}; // @[PE.scala:64:24, :108:10]
always @(posedge clock) begin // @[PE.scala:31:7]
if (io_in_valid_0) begin // @[PE.scala:31:7]
if (io_in_control_dataflow_0) begin // @[PE.scala:31:7]
if (io_in_control_dataflow_0 & io_in_control_propagate_0) // @[PE.scala:31:7, :70:15, :118:101, :119:30, :124:10]
c1 <= _GEN_7; // @[PE.scala:70:15, :124:10]
if (~io_in_control_dataflow_0 | io_in_control_propagate_0) begin // @[PE.scala:31:7, :71:15, :118:101, :119:30]
end
else // @[PE.scala:71:15, :118:101, :119:30]
c2 <= _GEN_7; // @[PE.scala:71:15, :124:10]
end
else begin // @[PE.scala:31:7]
c1 <= io_in_control_propagate_0 ? _c1_WIRE : _GEN_8; // @[PE.scala:31:7, :70:15, :103:30, :108:10, :109:10, :115:10]
c2 <= io_in_control_propagate_0 ? _GEN_8 : _c2_WIRE; // @[PE.scala:31:7, :71:15, :103:30, :108:10, :116:10]
end
last_s <= io_in_control_propagate_0; // @[PE.scala:31:7, :89:25]
end
always @(posedge)
MacUnit_118 mac_unit ( // @[PE.scala:64:24]
.clock (clock),
.reset (reset),
.io_in_a (io_in_a_0), // @[PE.scala:31:7]
.io_in_b (io_in_control_dataflow_0 ? (io_in_control_propagate_0 ? _mac_unit_io_in_b_WIRE_2 : _mac_unit_io_in_b_WIRE_3) : io_in_control_propagate_0 ? _mac_unit_io_in_b_WIRE : _mac_unit_io_in_b_WIRE_1), // @[PE.scala:31:7, :102:95, :103:30, :106:{24,37}, :113:{24,37}, :118:101, :119:30, :121:{24,38}, :127:{24,38}]
.io_in_c (io_in_control_dataflow_0 ? {{12{io_in_b_0[19]}}, io_in_b_0} : io_in_control_propagate_0 ? c2 : c1), // @[PE.scala:31:7, :70:15, :71:15, :102:95, :103:30, :107:24, :114:24, :118:101, :122:24]
.io_out_d (_mac_unit_io_out_d)
); // @[PE.scala:64:24]
assign io_out_a = io_out_a_0; // @[PE.scala:31:7]
assign io_out_b = io_out_b_0; // @[PE.scala:31:7]
assign io_out_c = io_out_c_0; // @[PE.scala:31:7]
assign io_out_control_dataflow = io_out_control_dataflow_0; // @[PE.scala:31:7]
assign io_out_control_propagate = io_out_control_propagate_0; // @[PE.scala:31:7]
assign io_out_control_shift = io_out_control_shift_0; // @[PE.scala:31:7]
assign io_out_id = io_out_id_0; // @[PE.scala:31:7]
assign io_out_last = io_out_last_0; // @[PE.scala:31:7]
assign io_out_valid = io_out_valid_0; // @[PE.scala:31:7]
assign io_bad_dataflow = io_bad_dataflow_0; // @[PE.scala:31:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
File Xbar.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.interrupts
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy.lazymodule._
class IntXbar()(implicit p: Parameters) extends LazyModule
{
val intnode = new IntNexusNode(
sinkFn = { _ => IntSinkPortParameters(Seq(IntSinkParameters())) },
sourceFn = { seq =>
IntSourcePortParameters((seq zip seq.map(_.num).scanLeft(0)(_+_).init).map {
case (s, o) => s.sources.map(z => z.copy(range = z.range.offset(o)))
}.flatten)
})
{
override def circuitIdentity = outputs == 1 && inputs == 1
}
lazy val module = new Impl
class Impl extends LazyRawModuleImp(this) {
override def desiredName = s"IntXbar_i${intnode.in.size}_o${intnode.out.size}"
val cat = intnode.in.map { case (i, e) => i.take(e.source.num) }.flatten
intnode.out.foreach { case (o, _) => o := cat }
}
}
class IntSyncXbar()(implicit p: Parameters) extends LazyModule
{
val intnode = new IntSyncNexusNode(
sinkFn = { _ => IntSinkPortParameters(Seq(IntSinkParameters())) },
sourceFn = { seq =>
IntSourcePortParameters((seq zip seq.map(_.num).scanLeft(0)(_+_).init).map {
case (s, o) => s.sources.map(z => z.copy(range = z.range.offset(o)))
}.flatten)
})
{
override def circuitIdentity = outputs == 1 && inputs == 1
}
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
override def desiredName = s"IntSyncXbar_i${intnode.in.size}_o${intnode.out.size}"
val cat = intnode.in.map { case (i, e) => i.sync.take(e.source.num) }.flatten
intnode.out.foreach { case (o, _) => o.sync := cat }
}
}
object IntXbar {
def apply()(implicit p: Parameters): IntNode = {
val xbar = LazyModule(new IntXbar)
xbar.intnode
}
}
object IntSyncXbar {
def apply()(implicit p: Parameters): IntSyncNode = {
val xbar = LazyModule(new IntSyncXbar)
xbar.intnode
}
}
| module IntXbar_i4_o1( // @[Xbar.scala:22:9]
input auto_anon_in_3_0, // @[LazyModuleImp.scala:107:25]
input auto_anon_in_2_0, // @[LazyModuleImp.scala:107:25]
input auto_anon_in_1_0, // @[LazyModuleImp.scala:107:25]
input auto_anon_in_1_1, // @[LazyModuleImp.scala:107:25]
input auto_anon_in_0_0, // @[LazyModuleImp.scala:107:25]
output auto_anon_out_0, // @[LazyModuleImp.scala:107:25]
output auto_anon_out_1, // @[LazyModuleImp.scala:107:25]
output auto_anon_out_2, // @[LazyModuleImp.scala:107:25]
output auto_anon_out_3, // @[LazyModuleImp.scala:107:25]
output auto_anon_out_4 // @[LazyModuleImp.scala:107:25]
);
assign auto_anon_out_0 = auto_anon_in_0_0; // @[Xbar.scala:22:9]
assign auto_anon_out_1 = auto_anon_in_1_0; // @[Xbar.scala:22:9]
assign auto_anon_out_2 = auto_anon_in_1_1; // @[Xbar.scala:22:9]
assign auto_anon_out_3 = auto_anon_in_2_0; // @[Xbar.scala:22:9]
assign auto_anon_out_4 = auto_anon_in_3_0; // @[Xbar.scala:22:9]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Scheduler.scala:
/*
* Copyright 2019 SiFive, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You should have received a copy of LICENSE.Apache2 along with
* this software. If not, you may obtain a copy at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sifive.blocks.inclusivecache
import chisel3._
import chisel3.util._
import freechips.rocketchip.diplomacy.AddressSet
import freechips.rocketchip.tilelink._
import freechips.rocketchip.util._
import chisel3.experimental.dataview._
class InclusiveCacheBankScheduler(params: InclusiveCacheParameters) extends Module
{
val io = IO(new Bundle {
val in = Flipped(TLBundle(params.inner.bundle))
val out = TLBundle(params.outer.bundle)
// Way permissions
val ways = Flipped(Vec(params.allClients, UInt(params.cache.ways.W)))
val divs = Flipped(Vec(params.allClients, UInt((InclusiveCacheParameters.lfsrBits + 1).W)))
// Control port
val req = Flipped(Decoupled(new SinkXRequest(params)))
val resp = Decoupled(new SourceXRequest(params))
})
val sourceA = Module(new SourceA(params))
val sourceB = Module(new SourceB(params))
val sourceC = Module(new SourceC(params))
val sourceD = Module(new SourceD(params))
val sourceE = Module(new SourceE(params))
val sourceX = Module(new SourceX(params))
io.out.a <> sourceA.io.a
io.out.c <> sourceC.io.c
io.out.e <> sourceE.io.e
io.in.b <> sourceB.io.b
io.in.d <> sourceD.io.d
io.resp <> sourceX.io.x
val sinkA = Module(new SinkA(params))
val sinkC = Module(new SinkC(params))
val sinkD = Module(new SinkD(params))
val sinkE = Module(new SinkE(params))
val sinkX = Module(new SinkX(params))
sinkA.io.a <> io.in.a
sinkC.io.c <> io.in.c
sinkE.io.e <> io.in.e
sinkD.io.d <> io.out.d
sinkX.io.x <> io.req
io.out.b.ready := true.B // disconnected
val directory = Module(new Directory(params))
val bankedStore = Module(new BankedStore(params))
val requests = Module(new ListBuffer(ListBufferParameters(new QueuedRequest(params), 3*params.mshrs, params.secondary, false)))
val mshrs = Seq.fill(params.mshrs) { Module(new MSHR(params)) }
val abc_mshrs = mshrs.init.init
val bc_mshr = mshrs.init.last
val c_mshr = mshrs.last
val nestedwb = Wire(new NestedWriteback(params))
// Deliver messages from Sinks to MSHRs
mshrs.zipWithIndex.foreach { case (m, i) =>
m.io.sinkc.valid := sinkC.io.resp.valid && sinkC.io.resp.bits.set === m.io.status.bits.set
m.io.sinkd.valid := sinkD.io.resp.valid && sinkD.io.resp.bits.source === i.U
m.io.sinke.valid := sinkE.io.resp.valid && sinkE.io.resp.bits.sink === i.U
m.io.sinkc.bits := sinkC.io.resp.bits
m.io.sinkd.bits := sinkD.io.resp.bits
m.io.sinke.bits := sinkE.io.resp.bits
m.io.nestedwb := nestedwb
}
// If the pre-emption BC or C MSHR have a matching set, the normal MSHR must be blocked
val mshr_stall_abc = abc_mshrs.map { m =>
(bc_mshr.io.status.valid && m.io.status.bits.set === bc_mshr.io.status.bits.set) ||
( c_mshr.io.status.valid && m.io.status.bits.set === c_mshr.io.status.bits.set)
}
val mshr_stall_bc =
c_mshr.io.status.valid && bc_mshr.io.status.bits.set === c_mshr.io.status.bits.set
val mshr_stall_c = false.B
val mshr_stall = mshr_stall_abc :+ mshr_stall_bc :+ mshr_stall_c
val stall_abc = (mshr_stall_abc zip abc_mshrs) map { case (s, m) => s && m.io.status.valid }
if (!params.lastLevel || !params.firstLevel)
params.ccover(stall_abc.reduce(_||_), "SCHEDULER_ABC_INTERLOCK", "ABC MSHR interlocked due to pre-emption")
if (!params.lastLevel)
params.ccover(mshr_stall_bc && bc_mshr.io.status.valid, "SCHEDULER_BC_INTERLOCK", "BC MSHR interlocked due to pre-emption")
// Consider scheduling an MSHR only if all the resources it requires are available
val mshr_request = Cat((mshrs zip mshr_stall).map { case (m, s) =>
m.io.schedule.valid && !s &&
(sourceA.io.req.ready || !m.io.schedule.bits.a.valid) &&
(sourceB.io.req.ready || !m.io.schedule.bits.b.valid) &&
(sourceC.io.req.ready || !m.io.schedule.bits.c.valid) &&
(sourceD.io.req.ready || !m.io.schedule.bits.d.valid) &&
(sourceE.io.req.ready || !m.io.schedule.bits.e.valid) &&
(sourceX.io.req.ready || !m.io.schedule.bits.x.valid) &&
(directory.io.write.ready || !m.io.schedule.bits.dir.valid)
}.reverse)
// Round-robin arbitration of MSHRs
val robin_filter = RegInit(0.U(params.mshrs.W))
val robin_request = Cat(mshr_request, mshr_request & robin_filter)
val mshr_selectOH2 = ~(leftOR(robin_request) << 1) & robin_request
val mshr_selectOH = mshr_selectOH2(2*params.mshrs-1, params.mshrs) | mshr_selectOH2(params.mshrs-1, 0)
val mshr_select = OHToUInt(mshr_selectOH)
val schedule = Mux1H(mshr_selectOH, mshrs.map(_.io.schedule.bits))
val scheduleTag = Mux1H(mshr_selectOH, mshrs.map(_.io.status.bits.tag))
val scheduleSet = Mux1H(mshr_selectOH, mshrs.map(_.io.status.bits.set))
// When an MSHR wins the schedule, it has lowest priority next time
when (mshr_request.orR) { robin_filter := ~rightOR(mshr_selectOH) }
// Fill in which MSHR sends the request
schedule.a.bits.source := mshr_select
schedule.c.bits.source := Mux(schedule.c.bits.opcode(1), mshr_select, 0.U) // only set for Release[Data] not ProbeAck[Data]
schedule.d.bits.sink := mshr_select
sourceA.io.req.valid := schedule.a.valid
sourceB.io.req.valid := schedule.b.valid
sourceC.io.req.valid := schedule.c.valid
sourceD.io.req.valid := schedule.d.valid
sourceE.io.req.valid := schedule.e.valid
sourceX.io.req.valid := schedule.x.valid
sourceA.io.req.bits.viewAsSupertype(chiselTypeOf(schedule.a.bits)) := schedule.a.bits
sourceB.io.req.bits.viewAsSupertype(chiselTypeOf(schedule.b.bits)) := schedule.b.bits
sourceC.io.req.bits.viewAsSupertype(chiselTypeOf(schedule.c.bits)) := schedule.c.bits
sourceD.io.req.bits.viewAsSupertype(chiselTypeOf(schedule.d.bits)) := schedule.d.bits
sourceE.io.req.bits.viewAsSupertype(chiselTypeOf(schedule.e.bits)) := schedule.e.bits
sourceX.io.req.bits.viewAsSupertype(chiselTypeOf(schedule.x.bits)) := schedule.x.bits
directory.io.write.valid := schedule.dir.valid
directory.io.write.bits.viewAsSupertype(chiselTypeOf(schedule.dir.bits)) := schedule.dir.bits
// Forward meta-data changes from nested transaction completion
val select_c = mshr_selectOH(params.mshrs-1)
val select_bc = mshr_selectOH(params.mshrs-2)
nestedwb.set := Mux(select_c, c_mshr.io.status.bits.set, bc_mshr.io.status.bits.set)
nestedwb.tag := Mux(select_c, c_mshr.io.status.bits.tag, bc_mshr.io.status.bits.tag)
nestedwb.b_toN := select_bc && bc_mshr.io.schedule.bits.dir.valid && bc_mshr.io.schedule.bits.dir.bits.data.state === MetaData.INVALID
nestedwb.b_toB := select_bc && bc_mshr.io.schedule.bits.dir.valid && bc_mshr.io.schedule.bits.dir.bits.data.state === MetaData.BRANCH
nestedwb.b_clr_dirty := select_bc && bc_mshr.io.schedule.bits.dir.valid
nestedwb.c_set_dirty := select_c && c_mshr.io.schedule.bits.dir.valid && c_mshr.io.schedule.bits.dir.bits.data.dirty
// Pick highest priority request
val request = Wire(Decoupled(new FullRequest(params)))
request.valid := directory.io.ready && (sinkA.io.req.valid || sinkX.io.req.valid || sinkC.io.req.valid)
request.bits := Mux(sinkC.io.req.valid, sinkC.io.req.bits,
Mux(sinkX.io.req.valid, sinkX.io.req.bits, sinkA.io.req.bits))
sinkC.io.req.ready := directory.io.ready && request.ready
sinkX.io.req.ready := directory.io.ready && request.ready && !sinkC.io.req.valid
sinkA.io.req.ready := directory.io.ready && request.ready && !sinkC.io.req.valid && !sinkX.io.req.valid
// If no MSHR has been assigned to this set, we need to allocate one
val setMatches = Cat(mshrs.map { m => m.io.status.valid && m.io.status.bits.set === request.bits.set }.reverse)
val alloc = !setMatches.orR // NOTE: no matches also means no BC or C pre-emption on this set
// If a same-set MSHR says that requests of this type must be blocked (for bounded time), do it
val blockB = Mux1H(setMatches, mshrs.map(_.io.status.bits.blockB)) && request.bits.prio(1)
val blockC = Mux1H(setMatches, mshrs.map(_.io.status.bits.blockC)) && request.bits.prio(2)
// If a same-set MSHR says that requests of this type must be handled out-of-band, use special BC|C MSHR
// ... these special MSHRs interlock the MSHR that said it should be pre-empted.
val nestB = Mux1H(setMatches, mshrs.map(_.io.status.bits.nestB)) && request.bits.prio(1)
val nestC = Mux1H(setMatches, mshrs.map(_.io.status.bits.nestC)) && request.bits.prio(2)
// Prevent priority inversion; we may not queue to MSHRs beyond our level
val prioFilter = Cat(request.bits.prio(2), !request.bits.prio(0), ~0.U((params.mshrs-2).W))
val lowerMatches = setMatches & prioFilter
// If we match an MSHR <= our priority that neither blocks nor nests us, queue to it.
val queue = lowerMatches.orR && !nestB && !nestC && !blockB && !blockC
if (!params.lastLevel) {
params.ccover(request.valid && blockB, "SCHEDULER_BLOCKB", "Interlock B request while resolving set conflict")
params.ccover(request.valid && nestB, "SCHEDULER_NESTB", "Priority escalation from channel B")
}
if (!params.firstLevel) {
params.ccover(request.valid && blockC, "SCHEDULER_BLOCKC", "Interlock C request while resolving set conflict")
params.ccover(request.valid && nestC, "SCHEDULER_NESTC", "Priority escalation from channel C")
}
params.ccover(request.valid && queue, "SCHEDULER_SECONDARY", "Enqueue secondary miss")
// It might happen that lowerMatches has >1 bit if the two special MSHRs are in-use
// We want to Q to the highest matching priority MSHR.
val lowerMatches1 =
Mux(lowerMatches(params.mshrs-1), 1.U << (params.mshrs-1),
Mux(lowerMatches(params.mshrs-2), 1.U << (params.mshrs-2),
lowerMatches))
// If this goes to the scheduled MSHR, it may need to be bypassed
// Alternatively, the MSHR may be refilled from a request queued in the ListBuffer
val selected_requests = Cat(mshr_selectOH, mshr_selectOH, mshr_selectOH) & requests.io.valid
val a_pop = selected_requests((0 + 1) * params.mshrs - 1, 0 * params.mshrs).orR
val b_pop = selected_requests((1 + 1) * params.mshrs - 1, 1 * params.mshrs).orR
val c_pop = selected_requests((2 + 1) * params.mshrs - 1, 2 * params.mshrs).orR
val bypassMatches = (mshr_selectOH & lowerMatches1).orR &&
Mux(c_pop || request.bits.prio(2), !c_pop, Mux(b_pop || request.bits.prio(1), !b_pop, !a_pop))
val may_pop = a_pop || b_pop || c_pop
val bypass = request.valid && queue && bypassMatches
val will_reload = schedule.reload && (may_pop || bypass)
val will_pop = schedule.reload && may_pop && !bypass
params.ccover(mshr_selectOH.orR && bypass, "SCHEDULER_BYPASS", "Bypass new request directly to conflicting MSHR")
params.ccover(mshr_selectOH.orR && will_reload, "SCHEDULER_RELOAD", "Back-to-back service of two requests")
params.ccover(mshr_selectOH.orR && will_pop, "SCHEDULER_POP", "Service of a secondary miss")
// Repeat the above logic, but without the fan-in
mshrs.zipWithIndex.foreach { case (m, i) =>
val sel = mshr_selectOH(i)
m.io.schedule.ready := sel
val a_pop = requests.io.valid(params.mshrs * 0 + i)
val b_pop = requests.io.valid(params.mshrs * 1 + i)
val c_pop = requests.io.valid(params.mshrs * 2 + i)
val bypassMatches = lowerMatches1(i) &&
Mux(c_pop || request.bits.prio(2), !c_pop, Mux(b_pop || request.bits.prio(1), !b_pop, !a_pop))
val may_pop = a_pop || b_pop || c_pop
val bypass = request.valid && queue && bypassMatches
val will_reload = m.io.schedule.bits.reload && (may_pop || bypass)
m.io.allocate.bits.viewAsSupertype(chiselTypeOf(requests.io.data)) := Mux(bypass, WireInit(new QueuedRequest(params), init = request.bits), requests.io.data)
m.io.allocate.bits.set := m.io.status.bits.set
m.io.allocate.bits.repeat := m.io.allocate.bits.tag === m.io.status.bits.tag
m.io.allocate.valid := sel && will_reload
}
// Determine which of the queued requests to pop (supposing will_pop)
val prio_requests = ~(~requests.io.valid | (requests.io.valid >> params.mshrs) | (requests.io.valid >> 2*params.mshrs))
val pop_index = OHToUInt(Cat(mshr_selectOH, mshr_selectOH, mshr_selectOH) & prio_requests)
requests.io.pop.valid := will_pop
requests.io.pop.bits := pop_index
// Reload from the Directory if the next MSHR operation changes tags
val lb_tag_mismatch = scheduleTag =/= requests.io.data.tag
val mshr_uses_directory_assuming_no_bypass = schedule.reload && may_pop && lb_tag_mismatch
val mshr_uses_directory_for_lb = will_pop && lb_tag_mismatch
val mshr_uses_directory = will_reload && scheduleTag =/= Mux(bypass, request.bits.tag, requests.io.data.tag)
// Is there an MSHR free for this request?
val mshr_validOH = Cat(mshrs.map(_.io.status.valid).reverse)
val mshr_free = (~mshr_validOH & prioFilter).orR
// Fanout the request to the appropriate handler (if any)
val bypassQueue = schedule.reload && bypassMatches
val request_alloc_cases =
(alloc && !mshr_uses_directory_assuming_no_bypass && mshr_free) ||
(nestB && !mshr_uses_directory_assuming_no_bypass && !bc_mshr.io.status.valid && !c_mshr.io.status.valid) ||
(nestC && !mshr_uses_directory_assuming_no_bypass && !c_mshr.io.status.valid)
request.ready := request_alloc_cases || (queue && (bypassQueue || requests.io.push.ready))
val alloc_uses_directory = request.valid && request_alloc_cases
// When a request goes through, it will need to hit the Directory
directory.io.read.valid := mshr_uses_directory || alloc_uses_directory
directory.io.read.bits.set := Mux(mshr_uses_directory_for_lb, scheduleSet, request.bits.set)
directory.io.read.bits.tag := Mux(mshr_uses_directory_for_lb, requests.io.data.tag, request.bits.tag)
// Enqueue the request if not bypassed directly into an MSHR
requests.io.push.valid := request.valid && queue && !bypassQueue
requests.io.push.bits.data := request.bits
requests.io.push.bits.index := Mux1H(
request.bits.prio, Seq(
OHToUInt(lowerMatches1 << params.mshrs*0),
OHToUInt(lowerMatches1 << params.mshrs*1),
OHToUInt(lowerMatches1 << params.mshrs*2)))
val mshr_insertOH = ~(leftOR(~mshr_validOH) << 1) & ~mshr_validOH & prioFilter
(mshr_insertOH.asBools zip mshrs) map { case (s, m) =>
when (request.valid && alloc && s && !mshr_uses_directory_assuming_no_bypass) {
m.io.allocate.valid := true.B
m.io.allocate.bits.viewAsSupertype(chiselTypeOf(request.bits)) := request.bits
m.io.allocate.bits.repeat := false.B
}
}
when (request.valid && nestB && !bc_mshr.io.status.valid && !c_mshr.io.status.valid && !mshr_uses_directory_assuming_no_bypass) {
bc_mshr.io.allocate.valid := true.B
bc_mshr.io.allocate.bits.viewAsSupertype(chiselTypeOf(request.bits)) := request.bits
bc_mshr.io.allocate.bits.repeat := false.B
assert (!request.bits.prio(0))
}
bc_mshr.io.allocate.bits.prio(0) := false.B
when (request.valid && nestC && !c_mshr.io.status.valid && !mshr_uses_directory_assuming_no_bypass) {
c_mshr.io.allocate.valid := true.B
c_mshr.io.allocate.bits.viewAsSupertype(chiselTypeOf(request.bits)) := request.bits
c_mshr.io.allocate.bits.repeat := false.B
assert (!request.bits.prio(0))
assert (!request.bits.prio(1))
}
c_mshr.io.allocate.bits.prio(0) := false.B
c_mshr.io.allocate.bits.prio(1) := false.B
// Fanout the result of the Directory lookup
val dirTarget = Mux(alloc, mshr_insertOH, Mux(nestB,(BigInt(1) << (params.mshrs-2)).U,(BigInt(1) << (params.mshrs-1)).U))
val directoryFanout = params.dirReg(RegNext(Mux(mshr_uses_directory, mshr_selectOH, Mux(alloc_uses_directory, dirTarget, 0.U))))
mshrs.zipWithIndex.foreach { case (m, i) =>
m.io.directory.valid := directoryFanout(i)
m.io.directory.bits := directory.io.result.bits
}
// MSHR response meta-data fetch
sinkC.io.way :=
Mux(bc_mshr.io.status.valid && bc_mshr.io.status.bits.set === sinkC.io.set,
bc_mshr.io.status.bits.way,
Mux1H(abc_mshrs.map(m => m.io.status.valid && m.io.status.bits.set === sinkC.io.set),
abc_mshrs.map(_.io.status.bits.way)))
sinkD.io.way := VecInit(mshrs.map(_.io.status.bits.way))(sinkD.io.source)
sinkD.io.set := VecInit(mshrs.map(_.io.status.bits.set))(sinkD.io.source)
// Beat buffer connections between components
sinkA.io.pb_pop <> sourceD.io.pb_pop
sourceD.io.pb_beat := sinkA.io.pb_beat
sinkC.io.rel_pop <> sourceD.io.rel_pop
sourceD.io.rel_beat := sinkC.io.rel_beat
// BankedStore ports
bankedStore.io.sinkC_adr <> sinkC.io.bs_adr
bankedStore.io.sinkC_dat := sinkC.io.bs_dat
bankedStore.io.sinkD_adr <> sinkD.io.bs_adr
bankedStore.io.sinkD_dat := sinkD.io.bs_dat
bankedStore.io.sourceC_adr <> sourceC.io.bs_adr
bankedStore.io.sourceD_radr <> sourceD.io.bs_radr
bankedStore.io.sourceD_wadr <> sourceD.io.bs_wadr
bankedStore.io.sourceD_wdat := sourceD.io.bs_wdat
sourceC.io.bs_dat := bankedStore.io.sourceC_dat
sourceD.io.bs_rdat := bankedStore.io.sourceD_rdat
// SourceD data hazard interlock
sourceD.io.evict_req := sourceC.io.evict_req
sourceD.io.grant_req := sinkD .io.grant_req
sourceC.io.evict_safe := sourceD.io.evict_safe
sinkD .io.grant_safe := sourceD.io.grant_safe
private def afmt(x: AddressSet) = s"""{"base":${x.base},"mask":${x.mask}}"""
private def addresses = params.inner.manager.managers.flatMap(_.address).map(afmt _).mkString(",")
private def setBits = params.addressMapping.drop(params.offsetBits).take(params.setBits).mkString(",")
private def tagBits = params.addressMapping.drop(params.offsetBits + params.setBits).take(params.tagBits).mkString(",")
private def simple = s""""reset":"${reset.pathName}","tagBits":[${tagBits}],"setBits":[${setBits}],"blockBytes":${params.cache.blockBytes},"ways":${params.cache.ways}"""
def json: String = s"""{"addresses":[${addresses}],${simple},"directory":${directory.json},"subbanks":${bankedStore.json}}"""
}
| module InclusiveCacheBankScheduler( // @[Scheduler.scala:27:7]
input clock, // @[Scheduler.scala:27:7]
input reset, // @[Scheduler.scala:27:7]
output io_in_a_ready, // @[Scheduler.scala:29:14]
input io_in_a_valid, // @[Scheduler.scala:29:14]
input [2:0] io_in_a_bits_opcode, // @[Scheduler.scala:29:14]
input [2:0] io_in_a_bits_param, // @[Scheduler.scala:29:14]
input [2:0] io_in_a_bits_size, // @[Scheduler.scala:29:14]
input [5:0] io_in_a_bits_source, // @[Scheduler.scala:29:14]
input [31:0] io_in_a_bits_address, // @[Scheduler.scala:29:14]
input [15:0] io_in_a_bits_mask, // @[Scheduler.scala:29:14]
input [127:0] io_in_a_bits_data, // @[Scheduler.scala:29:14]
input io_in_a_bits_corrupt, // @[Scheduler.scala:29:14]
input io_in_b_ready, // @[Scheduler.scala:29:14]
output io_in_b_valid, // @[Scheduler.scala:29:14]
output [1:0] io_in_b_bits_param, // @[Scheduler.scala:29:14]
output [31:0] io_in_b_bits_address, // @[Scheduler.scala:29:14]
output io_in_c_ready, // @[Scheduler.scala:29:14]
input io_in_c_valid, // @[Scheduler.scala:29:14]
input [2:0] io_in_c_bits_opcode, // @[Scheduler.scala:29:14]
input [2:0] io_in_c_bits_param, // @[Scheduler.scala:29:14]
input [2:0] io_in_c_bits_size, // @[Scheduler.scala:29:14]
input [5:0] io_in_c_bits_source, // @[Scheduler.scala:29:14]
input [31:0] io_in_c_bits_address, // @[Scheduler.scala:29:14]
input [127:0] io_in_c_bits_data, // @[Scheduler.scala:29:14]
input io_in_c_bits_corrupt, // @[Scheduler.scala:29:14]
input io_in_d_ready, // @[Scheduler.scala:29:14]
output io_in_d_valid, // @[Scheduler.scala:29:14]
output [2:0] io_in_d_bits_opcode, // @[Scheduler.scala:29:14]
output [1:0] io_in_d_bits_param, // @[Scheduler.scala:29:14]
output [2:0] io_in_d_bits_size, // @[Scheduler.scala:29:14]
output [5:0] io_in_d_bits_source, // @[Scheduler.scala:29:14]
output [3:0] io_in_d_bits_sink, // @[Scheduler.scala:29:14]
output io_in_d_bits_denied, // @[Scheduler.scala:29:14]
output [127:0] io_in_d_bits_data, // @[Scheduler.scala:29:14]
output io_in_d_bits_corrupt, // @[Scheduler.scala:29:14]
input io_in_e_valid, // @[Scheduler.scala:29:14]
input [3:0] io_in_e_bits_sink, // @[Scheduler.scala:29:14]
input io_out_a_ready, // @[Scheduler.scala:29:14]
output io_out_a_valid, // @[Scheduler.scala:29:14]
output [2:0] io_out_a_bits_opcode, // @[Scheduler.scala:29:14]
output [2:0] io_out_a_bits_param, // @[Scheduler.scala:29:14]
output [2:0] io_out_a_bits_size, // @[Scheduler.scala:29:14]
output [3:0] io_out_a_bits_source, // @[Scheduler.scala:29:14]
output [31:0] io_out_a_bits_address, // @[Scheduler.scala:29:14]
output [7:0] io_out_a_bits_mask, // @[Scheduler.scala:29:14]
output [63:0] io_out_a_bits_data, // @[Scheduler.scala:29:14]
output io_out_a_bits_corrupt, // @[Scheduler.scala:29:14]
input io_out_c_ready, // @[Scheduler.scala:29:14]
output io_out_c_valid, // @[Scheduler.scala:29:14]
output [2:0] io_out_c_bits_opcode, // @[Scheduler.scala:29:14]
output [2:0] io_out_c_bits_param, // @[Scheduler.scala:29:14]
output [2:0] io_out_c_bits_size, // @[Scheduler.scala:29:14]
output [3:0] io_out_c_bits_source, // @[Scheduler.scala:29:14]
output [31:0] io_out_c_bits_address, // @[Scheduler.scala:29:14]
output [63:0] io_out_c_bits_data, // @[Scheduler.scala:29:14]
output io_out_c_bits_corrupt, // @[Scheduler.scala:29:14]
output io_out_d_ready, // @[Scheduler.scala:29:14]
input io_out_d_valid, // @[Scheduler.scala:29:14]
input [2:0] io_out_d_bits_opcode, // @[Scheduler.scala:29:14]
input [1:0] io_out_d_bits_param, // @[Scheduler.scala:29:14]
input [2:0] io_out_d_bits_size, // @[Scheduler.scala:29:14]
input [3:0] io_out_d_bits_source, // @[Scheduler.scala:29:14]
input [2:0] io_out_d_bits_sink, // @[Scheduler.scala:29:14]
input io_out_d_bits_denied, // @[Scheduler.scala:29:14]
input [63:0] io_out_d_bits_data, // @[Scheduler.scala:29:14]
input io_out_d_bits_corrupt, // @[Scheduler.scala:29:14]
output io_out_e_valid, // @[Scheduler.scala:29:14]
output [2:0] io_out_e_bits_sink, // @[Scheduler.scala:29:14]
output io_req_ready, // @[Scheduler.scala:29:14]
input io_req_valid, // @[Scheduler.scala:29:14]
input [31:0] io_req_bits_address, // @[Scheduler.scala:29:14]
output io_resp_valid // @[Scheduler.scala:29:14]
);
wire [8:0] mshrs_11_io_allocate_bits_tag; // @[Scheduler.scala:233:72, :280:83, :282:70, :295:103, :297:73]
wire [8:0] mshrs_10_io_allocate_bits_tag; // @[Scheduler.scala:233:72, :280:83, :282:70, :287:131, :289:74]
wire [8:0] mshrs_9_io_allocate_bits_tag; // @[Scheduler.scala:233:72, :280:83, :282:70]
wire [8:0] mshrs_8_io_allocate_bits_tag; // @[Scheduler.scala:233:72, :280:83, :282:70]
wire [8:0] mshrs_7_io_allocate_bits_tag; // @[Scheduler.scala:233:72, :280:83, :282:70]
wire [8:0] mshrs_6_io_allocate_bits_tag; // @[Scheduler.scala:233:72, :280:83, :282:70]
wire [8:0] mshrs_5_io_allocate_bits_tag; // @[Scheduler.scala:233:72, :280:83, :282:70]
wire [8:0] mshrs_4_io_allocate_bits_tag; // @[Scheduler.scala:233:72, :280:83, :282:70]
wire [8:0] mshrs_3_io_allocate_bits_tag; // @[Scheduler.scala:233:72, :280:83, :282:70]
wire [8:0] mshrs_2_io_allocate_bits_tag; // @[Scheduler.scala:233:72, :280:83, :282:70]
wire [8:0] mshrs_1_io_allocate_bits_tag; // @[Scheduler.scala:233:72, :280:83, :282:70]
wire [8:0] mshrs_0_io_allocate_bits_tag; // @[Scheduler.scala:233:72, :280:83, :282:70]
wire [5:0] request_bits_put; // @[Scheduler.scala:163:21]
wire [5:0] request_bits_offset; // @[Scheduler.scala:163:21]
wire [8:0] request_bits_tag; // @[Scheduler.scala:163:21]
wire [5:0] request_bits_source; // @[Scheduler.scala:163:21]
wire [2:0] request_bits_size; // @[Scheduler.scala:163:21]
wire [2:0] request_bits_param; // @[Scheduler.scala:163:21]
wire [2:0] request_bits_opcode; // @[Scheduler.scala:163:21]
wire request_bits_control; // @[Scheduler.scala:163:21]
wire request_bits_prio_2; // @[Scheduler.scala:163:21]
wire request_bits_prio_0; // @[Scheduler.scala:163:21]
wire _mshrs_11_io_status_valid; // @[Scheduler.scala:71:46]
wire [10:0] _mshrs_11_io_status_bits_set; // @[Scheduler.scala:71:46]
wire [8:0] _mshrs_11_io_status_bits_tag; // @[Scheduler.scala:71:46]
wire _mshrs_11_io_status_bits_blockB; // @[Scheduler.scala:71:46]
wire _mshrs_11_io_status_bits_nestB; // @[Scheduler.scala:71:46]
wire _mshrs_11_io_status_bits_blockC; // @[Scheduler.scala:71:46]
wire _mshrs_11_io_status_bits_nestC; // @[Scheduler.scala:71:46]
wire _mshrs_11_io_schedule_bits_a_valid; // @[Scheduler.scala:71:46]
wire [8:0] _mshrs_11_io_schedule_bits_a_bits_tag; // @[Scheduler.scala:71:46]
wire [10:0] _mshrs_11_io_schedule_bits_a_bits_set; // @[Scheduler.scala:71:46]
wire [2:0] _mshrs_11_io_schedule_bits_a_bits_param; // @[Scheduler.scala:71:46]
wire _mshrs_11_io_schedule_bits_a_bits_block; // @[Scheduler.scala:71:46]
wire _mshrs_11_io_schedule_bits_b_valid; // @[Scheduler.scala:71:46]
wire [2:0] _mshrs_11_io_schedule_bits_b_bits_param; // @[Scheduler.scala:71:46]
wire [8:0] _mshrs_11_io_schedule_bits_b_bits_tag; // @[Scheduler.scala:71:46]
wire [10:0] _mshrs_11_io_schedule_bits_b_bits_set; // @[Scheduler.scala:71:46]
wire _mshrs_11_io_schedule_bits_b_bits_clients; // @[Scheduler.scala:71:46]
wire _mshrs_11_io_schedule_bits_c_valid; // @[Scheduler.scala:71:46]
wire [2:0] _mshrs_11_io_schedule_bits_c_bits_opcode; // @[Scheduler.scala:71:46]
wire [2:0] _mshrs_11_io_schedule_bits_c_bits_param; // @[Scheduler.scala:71:46]
wire [8:0] _mshrs_11_io_schedule_bits_c_bits_tag; // @[Scheduler.scala:71:46]
wire [10:0] _mshrs_11_io_schedule_bits_c_bits_set; // @[Scheduler.scala:71:46]
wire [3:0] _mshrs_11_io_schedule_bits_c_bits_way; // @[Scheduler.scala:71:46]
wire _mshrs_11_io_schedule_bits_c_bits_dirty; // @[Scheduler.scala:71:46]
wire _mshrs_11_io_schedule_bits_d_valid; // @[Scheduler.scala:71:46]
wire _mshrs_11_io_schedule_bits_d_bits_prio_2; // @[Scheduler.scala:71:46]
wire _mshrs_11_io_schedule_bits_d_bits_control; // @[Scheduler.scala:71:46]
wire [2:0] _mshrs_11_io_schedule_bits_d_bits_opcode; // @[Scheduler.scala:71:46]
wire [2:0] _mshrs_11_io_schedule_bits_d_bits_param; // @[Scheduler.scala:71:46]
wire [2:0] _mshrs_11_io_schedule_bits_d_bits_size; // @[Scheduler.scala:71:46]
wire [5:0] _mshrs_11_io_schedule_bits_d_bits_source; // @[Scheduler.scala:71:46]
wire [8:0] _mshrs_11_io_schedule_bits_d_bits_tag; // @[Scheduler.scala:71:46]
wire [5:0] _mshrs_11_io_schedule_bits_d_bits_offset; // @[Scheduler.scala:71:46]
wire [5:0] _mshrs_11_io_schedule_bits_d_bits_put; // @[Scheduler.scala:71:46]
wire [10:0] _mshrs_11_io_schedule_bits_d_bits_set; // @[Scheduler.scala:71:46]
wire [3:0] _mshrs_11_io_schedule_bits_d_bits_way; // @[Scheduler.scala:71:46]
wire _mshrs_11_io_schedule_bits_d_bits_bad; // @[Scheduler.scala:71:46]
wire _mshrs_11_io_schedule_bits_e_valid; // @[Scheduler.scala:71:46]
wire [2:0] _mshrs_11_io_schedule_bits_e_bits_sink; // @[Scheduler.scala:71:46]
wire _mshrs_11_io_schedule_bits_x_valid; // @[Scheduler.scala:71:46]
wire _mshrs_11_io_schedule_bits_dir_valid; // @[Scheduler.scala:71:46]
wire [10:0] _mshrs_11_io_schedule_bits_dir_bits_set; // @[Scheduler.scala:71:46]
wire [3:0] _mshrs_11_io_schedule_bits_dir_bits_way; // @[Scheduler.scala:71:46]
wire _mshrs_11_io_schedule_bits_dir_bits_data_dirty; // @[Scheduler.scala:71:46]
wire [1:0] _mshrs_11_io_schedule_bits_dir_bits_data_state; // @[Scheduler.scala:71:46]
wire _mshrs_11_io_schedule_bits_dir_bits_data_clients; // @[Scheduler.scala:71:46]
wire [8:0] _mshrs_11_io_schedule_bits_dir_bits_data_tag; // @[Scheduler.scala:71:46]
wire _mshrs_11_io_schedule_bits_reload; // @[Scheduler.scala:71:46]
wire _mshrs_10_io_status_valid; // @[Scheduler.scala:71:46]
wire [10:0] _mshrs_10_io_status_bits_set; // @[Scheduler.scala:71:46]
wire [8:0] _mshrs_10_io_status_bits_tag; // @[Scheduler.scala:71:46]
wire [3:0] _mshrs_10_io_status_bits_way; // @[Scheduler.scala:71:46]
wire _mshrs_10_io_status_bits_blockB; // @[Scheduler.scala:71:46]
wire _mshrs_10_io_status_bits_nestB; // @[Scheduler.scala:71:46]
wire _mshrs_10_io_status_bits_blockC; // @[Scheduler.scala:71:46]
wire _mshrs_10_io_status_bits_nestC; // @[Scheduler.scala:71:46]
wire _mshrs_10_io_schedule_valid; // @[Scheduler.scala:71:46]
wire _mshrs_10_io_schedule_bits_a_valid; // @[Scheduler.scala:71:46]
wire [8:0] _mshrs_10_io_schedule_bits_a_bits_tag; // @[Scheduler.scala:71:46]
wire [10:0] _mshrs_10_io_schedule_bits_a_bits_set; // @[Scheduler.scala:71:46]
wire [2:0] _mshrs_10_io_schedule_bits_a_bits_param; // @[Scheduler.scala:71:46]
wire _mshrs_10_io_schedule_bits_a_bits_block; // @[Scheduler.scala:71:46]
wire _mshrs_10_io_schedule_bits_b_valid; // @[Scheduler.scala:71:46]
wire [2:0] _mshrs_10_io_schedule_bits_b_bits_param; // @[Scheduler.scala:71:46]
wire [8:0] _mshrs_10_io_schedule_bits_b_bits_tag; // @[Scheduler.scala:71:46]
wire [10:0] _mshrs_10_io_schedule_bits_b_bits_set; // @[Scheduler.scala:71:46]
wire _mshrs_10_io_schedule_bits_b_bits_clients; // @[Scheduler.scala:71:46]
wire _mshrs_10_io_schedule_bits_c_valid; // @[Scheduler.scala:71:46]
wire [2:0] _mshrs_10_io_schedule_bits_c_bits_opcode; // @[Scheduler.scala:71:46]
wire [2:0] _mshrs_10_io_schedule_bits_c_bits_param; // @[Scheduler.scala:71:46]
wire [8:0] _mshrs_10_io_schedule_bits_c_bits_tag; // @[Scheduler.scala:71:46]
wire [10:0] _mshrs_10_io_schedule_bits_c_bits_set; // @[Scheduler.scala:71:46]
wire [3:0] _mshrs_10_io_schedule_bits_c_bits_way; // @[Scheduler.scala:71:46]
wire _mshrs_10_io_schedule_bits_c_bits_dirty; // @[Scheduler.scala:71:46]
wire _mshrs_10_io_schedule_bits_d_valid; // @[Scheduler.scala:71:46]
wire _mshrs_10_io_schedule_bits_d_bits_prio_1; // @[Scheduler.scala:71:46]
wire _mshrs_10_io_schedule_bits_d_bits_prio_2; // @[Scheduler.scala:71:46]
wire _mshrs_10_io_schedule_bits_d_bits_control; // @[Scheduler.scala:71:46]
wire [2:0] _mshrs_10_io_schedule_bits_d_bits_opcode; // @[Scheduler.scala:71:46]
wire [2:0] _mshrs_10_io_schedule_bits_d_bits_param; // @[Scheduler.scala:71:46]
wire [2:0] _mshrs_10_io_schedule_bits_d_bits_size; // @[Scheduler.scala:71:46]
wire [5:0] _mshrs_10_io_schedule_bits_d_bits_source; // @[Scheduler.scala:71:46]
wire [8:0] _mshrs_10_io_schedule_bits_d_bits_tag; // @[Scheduler.scala:71:46]
wire [5:0] _mshrs_10_io_schedule_bits_d_bits_offset; // @[Scheduler.scala:71:46]
wire [5:0] _mshrs_10_io_schedule_bits_d_bits_put; // @[Scheduler.scala:71:46]
wire [10:0] _mshrs_10_io_schedule_bits_d_bits_set; // @[Scheduler.scala:71:46]
wire [3:0] _mshrs_10_io_schedule_bits_d_bits_way; // @[Scheduler.scala:71:46]
wire _mshrs_10_io_schedule_bits_d_bits_bad; // @[Scheduler.scala:71:46]
wire _mshrs_10_io_schedule_bits_e_valid; // @[Scheduler.scala:71:46]
wire [2:0] _mshrs_10_io_schedule_bits_e_bits_sink; // @[Scheduler.scala:71:46]
wire _mshrs_10_io_schedule_bits_x_valid; // @[Scheduler.scala:71:46]
wire _mshrs_10_io_schedule_bits_dir_valid; // @[Scheduler.scala:71:46]
wire [10:0] _mshrs_10_io_schedule_bits_dir_bits_set; // @[Scheduler.scala:71:46]
wire [3:0] _mshrs_10_io_schedule_bits_dir_bits_way; // @[Scheduler.scala:71:46]
wire _mshrs_10_io_schedule_bits_dir_bits_data_dirty; // @[Scheduler.scala:71:46]
wire [1:0] _mshrs_10_io_schedule_bits_dir_bits_data_state; // @[Scheduler.scala:71:46]
wire _mshrs_10_io_schedule_bits_dir_bits_data_clients; // @[Scheduler.scala:71:46]
wire [8:0] _mshrs_10_io_schedule_bits_dir_bits_data_tag; // @[Scheduler.scala:71:46]
wire _mshrs_10_io_schedule_bits_reload; // @[Scheduler.scala:71:46]
wire _mshrs_9_io_status_valid; // @[Scheduler.scala:71:46]
wire [10:0] _mshrs_9_io_status_bits_set; // @[Scheduler.scala:71:46]
wire [8:0] _mshrs_9_io_status_bits_tag; // @[Scheduler.scala:71:46]
wire [3:0] _mshrs_9_io_status_bits_way; // @[Scheduler.scala:71:46]
wire _mshrs_9_io_status_bits_blockB; // @[Scheduler.scala:71:46]
wire _mshrs_9_io_status_bits_nestB; // @[Scheduler.scala:71:46]
wire _mshrs_9_io_status_bits_blockC; // @[Scheduler.scala:71:46]
wire _mshrs_9_io_status_bits_nestC; // @[Scheduler.scala:71:46]
wire _mshrs_9_io_schedule_valid; // @[Scheduler.scala:71:46]
wire _mshrs_9_io_schedule_bits_a_valid; // @[Scheduler.scala:71:46]
wire [8:0] _mshrs_9_io_schedule_bits_a_bits_tag; // @[Scheduler.scala:71:46]
wire [10:0] _mshrs_9_io_schedule_bits_a_bits_set; // @[Scheduler.scala:71:46]
wire [2:0] _mshrs_9_io_schedule_bits_a_bits_param; // @[Scheduler.scala:71:46]
wire _mshrs_9_io_schedule_bits_a_bits_block; // @[Scheduler.scala:71:46]
wire _mshrs_9_io_schedule_bits_b_valid; // @[Scheduler.scala:71:46]
wire [2:0] _mshrs_9_io_schedule_bits_b_bits_param; // @[Scheduler.scala:71:46]
wire [8:0] _mshrs_9_io_schedule_bits_b_bits_tag; // @[Scheduler.scala:71:46]
wire [10:0] _mshrs_9_io_schedule_bits_b_bits_set; // @[Scheduler.scala:71:46]
wire _mshrs_9_io_schedule_bits_b_bits_clients; // @[Scheduler.scala:71:46]
wire _mshrs_9_io_schedule_bits_c_valid; // @[Scheduler.scala:71:46]
wire [2:0] _mshrs_9_io_schedule_bits_c_bits_opcode; // @[Scheduler.scala:71:46]
wire [2:0] _mshrs_9_io_schedule_bits_c_bits_param; // @[Scheduler.scala:71:46]
wire [8:0] _mshrs_9_io_schedule_bits_c_bits_tag; // @[Scheduler.scala:71:46]
wire [10:0] _mshrs_9_io_schedule_bits_c_bits_set; // @[Scheduler.scala:71:46]
wire [3:0] _mshrs_9_io_schedule_bits_c_bits_way; // @[Scheduler.scala:71:46]
wire _mshrs_9_io_schedule_bits_c_bits_dirty; // @[Scheduler.scala:71:46]
wire _mshrs_9_io_schedule_bits_d_valid; // @[Scheduler.scala:71:46]
wire _mshrs_9_io_schedule_bits_d_bits_prio_0; // @[Scheduler.scala:71:46]
wire _mshrs_9_io_schedule_bits_d_bits_prio_1; // @[Scheduler.scala:71:46]
wire _mshrs_9_io_schedule_bits_d_bits_prio_2; // @[Scheduler.scala:71:46]
wire _mshrs_9_io_schedule_bits_d_bits_control; // @[Scheduler.scala:71:46]
wire [2:0] _mshrs_9_io_schedule_bits_d_bits_opcode; // @[Scheduler.scala:71:46]
wire [2:0] _mshrs_9_io_schedule_bits_d_bits_param; // @[Scheduler.scala:71:46]
wire [2:0] _mshrs_9_io_schedule_bits_d_bits_size; // @[Scheduler.scala:71:46]
wire [5:0] _mshrs_9_io_schedule_bits_d_bits_source; // @[Scheduler.scala:71:46]
wire [8:0] _mshrs_9_io_schedule_bits_d_bits_tag; // @[Scheduler.scala:71:46]
wire [5:0] _mshrs_9_io_schedule_bits_d_bits_offset; // @[Scheduler.scala:71:46]
wire [5:0] _mshrs_9_io_schedule_bits_d_bits_put; // @[Scheduler.scala:71:46]
wire [10:0] _mshrs_9_io_schedule_bits_d_bits_set; // @[Scheduler.scala:71:46]
wire [3:0] _mshrs_9_io_schedule_bits_d_bits_way; // @[Scheduler.scala:71:46]
wire _mshrs_9_io_schedule_bits_d_bits_bad; // @[Scheduler.scala:71:46]
wire _mshrs_9_io_schedule_bits_e_valid; // @[Scheduler.scala:71:46]
wire [2:0] _mshrs_9_io_schedule_bits_e_bits_sink; // @[Scheduler.scala:71:46]
wire _mshrs_9_io_schedule_bits_x_valid; // @[Scheduler.scala:71:46]
wire _mshrs_9_io_schedule_bits_dir_valid; // @[Scheduler.scala:71:46]
wire [10:0] _mshrs_9_io_schedule_bits_dir_bits_set; // @[Scheduler.scala:71:46]
wire [3:0] _mshrs_9_io_schedule_bits_dir_bits_way; // @[Scheduler.scala:71:46]
wire _mshrs_9_io_schedule_bits_dir_bits_data_dirty; // @[Scheduler.scala:71:46]
wire [1:0] _mshrs_9_io_schedule_bits_dir_bits_data_state; // @[Scheduler.scala:71:46]
wire _mshrs_9_io_schedule_bits_dir_bits_data_clients; // @[Scheduler.scala:71:46]
wire [8:0] _mshrs_9_io_schedule_bits_dir_bits_data_tag; // @[Scheduler.scala:71:46]
wire _mshrs_9_io_schedule_bits_reload; // @[Scheduler.scala:71:46]
wire _mshrs_8_io_status_valid; // @[Scheduler.scala:71:46]
wire [10:0] _mshrs_8_io_status_bits_set; // @[Scheduler.scala:71:46]
wire [8:0] _mshrs_8_io_status_bits_tag; // @[Scheduler.scala:71:46]
wire [3:0] _mshrs_8_io_status_bits_way; // @[Scheduler.scala:71:46]
wire _mshrs_8_io_status_bits_blockB; // @[Scheduler.scala:71:46]
wire _mshrs_8_io_status_bits_nestB; // @[Scheduler.scala:71:46]
wire _mshrs_8_io_status_bits_blockC; // @[Scheduler.scala:71:46]
wire _mshrs_8_io_status_bits_nestC; // @[Scheduler.scala:71:46]
wire _mshrs_8_io_schedule_valid; // @[Scheduler.scala:71:46]
wire _mshrs_8_io_schedule_bits_a_valid; // @[Scheduler.scala:71:46]
wire [8:0] _mshrs_8_io_schedule_bits_a_bits_tag; // @[Scheduler.scala:71:46]
wire [10:0] _mshrs_8_io_schedule_bits_a_bits_set; // @[Scheduler.scala:71:46]
wire [2:0] _mshrs_8_io_schedule_bits_a_bits_param; // @[Scheduler.scala:71:46]
wire _mshrs_8_io_schedule_bits_a_bits_block; // @[Scheduler.scala:71:46]
wire _mshrs_8_io_schedule_bits_b_valid; // @[Scheduler.scala:71:46]
wire [2:0] _mshrs_8_io_schedule_bits_b_bits_param; // @[Scheduler.scala:71:46]
wire [8:0] _mshrs_8_io_schedule_bits_b_bits_tag; // @[Scheduler.scala:71:46]
wire [10:0] _mshrs_8_io_schedule_bits_b_bits_set; // @[Scheduler.scala:71:46]
wire _mshrs_8_io_schedule_bits_b_bits_clients; // @[Scheduler.scala:71:46]
wire _mshrs_8_io_schedule_bits_c_valid; // @[Scheduler.scala:71:46]
wire [2:0] _mshrs_8_io_schedule_bits_c_bits_opcode; // @[Scheduler.scala:71:46]
wire [2:0] _mshrs_8_io_schedule_bits_c_bits_param; // @[Scheduler.scala:71:46]
wire [8:0] _mshrs_8_io_schedule_bits_c_bits_tag; // @[Scheduler.scala:71:46]
wire [10:0] _mshrs_8_io_schedule_bits_c_bits_set; // @[Scheduler.scala:71:46]
wire [3:0] _mshrs_8_io_schedule_bits_c_bits_way; // @[Scheduler.scala:71:46]
wire _mshrs_8_io_schedule_bits_c_bits_dirty; // @[Scheduler.scala:71:46]
wire _mshrs_8_io_schedule_bits_d_valid; // @[Scheduler.scala:71:46]
wire _mshrs_8_io_schedule_bits_d_bits_prio_0; // @[Scheduler.scala:71:46]
wire _mshrs_8_io_schedule_bits_d_bits_prio_1; // @[Scheduler.scala:71:46]
wire _mshrs_8_io_schedule_bits_d_bits_prio_2; // @[Scheduler.scala:71:46]
wire _mshrs_8_io_schedule_bits_d_bits_control; // @[Scheduler.scala:71:46]
wire [2:0] _mshrs_8_io_schedule_bits_d_bits_opcode; // @[Scheduler.scala:71:46]
wire [2:0] _mshrs_8_io_schedule_bits_d_bits_param; // @[Scheduler.scala:71:46]
wire [2:0] _mshrs_8_io_schedule_bits_d_bits_size; // @[Scheduler.scala:71:46]
wire [5:0] _mshrs_8_io_schedule_bits_d_bits_source; // @[Scheduler.scala:71:46]
wire [8:0] _mshrs_8_io_schedule_bits_d_bits_tag; // @[Scheduler.scala:71:46]
wire [5:0] _mshrs_8_io_schedule_bits_d_bits_offset; // @[Scheduler.scala:71:46]
wire [5:0] _mshrs_8_io_schedule_bits_d_bits_put; // @[Scheduler.scala:71:46]
wire [10:0] _mshrs_8_io_schedule_bits_d_bits_set; // @[Scheduler.scala:71:46]
wire [3:0] _mshrs_8_io_schedule_bits_d_bits_way; // @[Scheduler.scala:71:46]
wire _mshrs_8_io_schedule_bits_d_bits_bad; // @[Scheduler.scala:71:46]
wire _mshrs_8_io_schedule_bits_e_valid; // @[Scheduler.scala:71:46]
wire [2:0] _mshrs_8_io_schedule_bits_e_bits_sink; // @[Scheduler.scala:71:46]
wire _mshrs_8_io_schedule_bits_x_valid; // @[Scheduler.scala:71:46]
wire _mshrs_8_io_schedule_bits_dir_valid; // @[Scheduler.scala:71:46]
wire [10:0] _mshrs_8_io_schedule_bits_dir_bits_set; // @[Scheduler.scala:71:46]
wire [3:0] _mshrs_8_io_schedule_bits_dir_bits_way; // @[Scheduler.scala:71:46]
wire _mshrs_8_io_schedule_bits_dir_bits_data_dirty; // @[Scheduler.scala:71:46]
wire [1:0] _mshrs_8_io_schedule_bits_dir_bits_data_state; // @[Scheduler.scala:71:46]
wire _mshrs_8_io_schedule_bits_dir_bits_data_clients; // @[Scheduler.scala:71:46]
wire [8:0] _mshrs_8_io_schedule_bits_dir_bits_data_tag; // @[Scheduler.scala:71:46]
wire _mshrs_8_io_schedule_bits_reload; // @[Scheduler.scala:71:46]
wire _mshrs_7_io_status_valid; // @[Scheduler.scala:71:46]
wire [10:0] _mshrs_7_io_status_bits_set; // @[Scheduler.scala:71:46]
wire [8:0] _mshrs_7_io_status_bits_tag; // @[Scheduler.scala:71:46]
wire [3:0] _mshrs_7_io_status_bits_way; // @[Scheduler.scala:71:46]
wire _mshrs_7_io_status_bits_blockB; // @[Scheduler.scala:71:46]
wire _mshrs_7_io_status_bits_nestB; // @[Scheduler.scala:71:46]
wire _mshrs_7_io_status_bits_blockC; // @[Scheduler.scala:71:46]
wire _mshrs_7_io_status_bits_nestC; // @[Scheduler.scala:71:46]
wire _mshrs_7_io_schedule_valid; // @[Scheduler.scala:71:46]
wire _mshrs_7_io_schedule_bits_a_valid; // @[Scheduler.scala:71:46]
wire [8:0] _mshrs_7_io_schedule_bits_a_bits_tag; // @[Scheduler.scala:71:46]
wire [10:0] _mshrs_7_io_schedule_bits_a_bits_set; // @[Scheduler.scala:71:46]
wire [2:0] _mshrs_7_io_schedule_bits_a_bits_param; // @[Scheduler.scala:71:46]
wire _mshrs_7_io_schedule_bits_a_bits_block; // @[Scheduler.scala:71:46]
wire _mshrs_7_io_schedule_bits_b_valid; // @[Scheduler.scala:71:46]
wire [2:0] _mshrs_7_io_schedule_bits_b_bits_param; // @[Scheduler.scala:71:46]
wire [8:0] _mshrs_7_io_schedule_bits_b_bits_tag; // @[Scheduler.scala:71:46]
wire [10:0] _mshrs_7_io_schedule_bits_b_bits_set; // @[Scheduler.scala:71:46]
wire _mshrs_7_io_schedule_bits_b_bits_clients; // @[Scheduler.scala:71:46]
wire _mshrs_7_io_schedule_bits_c_valid; // @[Scheduler.scala:71:46]
wire [2:0] _mshrs_7_io_schedule_bits_c_bits_opcode; // @[Scheduler.scala:71:46]
wire [2:0] _mshrs_7_io_schedule_bits_c_bits_param; // @[Scheduler.scala:71:46]
wire [8:0] _mshrs_7_io_schedule_bits_c_bits_tag; // @[Scheduler.scala:71:46]
wire [10:0] _mshrs_7_io_schedule_bits_c_bits_set; // @[Scheduler.scala:71:46]
wire [3:0] _mshrs_7_io_schedule_bits_c_bits_way; // @[Scheduler.scala:71:46]
wire _mshrs_7_io_schedule_bits_c_bits_dirty; // @[Scheduler.scala:71:46]
wire _mshrs_7_io_schedule_bits_d_valid; // @[Scheduler.scala:71:46]
wire _mshrs_7_io_schedule_bits_d_bits_prio_0; // @[Scheduler.scala:71:46]
wire _mshrs_7_io_schedule_bits_d_bits_prio_1; // @[Scheduler.scala:71:46]
wire _mshrs_7_io_schedule_bits_d_bits_prio_2; // @[Scheduler.scala:71:46]
wire _mshrs_7_io_schedule_bits_d_bits_control; // @[Scheduler.scala:71:46]
wire [2:0] _mshrs_7_io_schedule_bits_d_bits_opcode; // @[Scheduler.scala:71:46]
wire [2:0] _mshrs_7_io_schedule_bits_d_bits_param; // @[Scheduler.scala:71:46]
wire [2:0] _mshrs_7_io_schedule_bits_d_bits_size; // @[Scheduler.scala:71:46]
wire [5:0] _mshrs_7_io_schedule_bits_d_bits_source; // @[Scheduler.scala:71:46]
wire [8:0] _mshrs_7_io_schedule_bits_d_bits_tag; // @[Scheduler.scala:71:46]
wire [5:0] _mshrs_7_io_schedule_bits_d_bits_offset; // @[Scheduler.scala:71:46]
wire [5:0] _mshrs_7_io_schedule_bits_d_bits_put; // @[Scheduler.scala:71:46]
wire [10:0] _mshrs_7_io_schedule_bits_d_bits_set; // @[Scheduler.scala:71:46]
wire [3:0] _mshrs_7_io_schedule_bits_d_bits_way; // @[Scheduler.scala:71:46]
wire _mshrs_7_io_schedule_bits_d_bits_bad; // @[Scheduler.scala:71:46]
wire _mshrs_7_io_schedule_bits_e_valid; // @[Scheduler.scala:71:46]
wire [2:0] _mshrs_7_io_schedule_bits_e_bits_sink; // @[Scheduler.scala:71:46]
wire _mshrs_7_io_schedule_bits_x_valid; // @[Scheduler.scala:71:46]
wire _mshrs_7_io_schedule_bits_dir_valid; // @[Scheduler.scala:71:46]
wire [10:0] _mshrs_7_io_schedule_bits_dir_bits_set; // @[Scheduler.scala:71:46]
wire [3:0] _mshrs_7_io_schedule_bits_dir_bits_way; // @[Scheduler.scala:71:46]
wire _mshrs_7_io_schedule_bits_dir_bits_data_dirty; // @[Scheduler.scala:71:46]
wire [1:0] _mshrs_7_io_schedule_bits_dir_bits_data_state; // @[Scheduler.scala:71:46]
wire _mshrs_7_io_schedule_bits_dir_bits_data_clients; // @[Scheduler.scala:71:46]
wire [8:0] _mshrs_7_io_schedule_bits_dir_bits_data_tag; // @[Scheduler.scala:71:46]
wire _mshrs_7_io_schedule_bits_reload; // @[Scheduler.scala:71:46]
wire _mshrs_6_io_status_valid; // @[Scheduler.scala:71:46]
wire [10:0] _mshrs_6_io_status_bits_set; // @[Scheduler.scala:71:46]
wire [8:0] _mshrs_6_io_status_bits_tag; // @[Scheduler.scala:71:46]
wire [3:0] _mshrs_6_io_status_bits_way; // @[Scheduler.scala:71:46]
wire _mshrs_6_io_status_bits_blockB; // @[Scheduler.scala:71:46]
wire _mshrs_6_io_status_bits_nestB; // @[Scheduler.scala:71:46]
wire _mshrs_6_io_status_bits_blockC; // @[Scheduler.scala:71:46]
wire _mshrs_6_io_status_bits_nestC; // @[Scheduler.scala:71:46]
wire _mshrs_6_io_schedule_valid; // @[Scheduler.scala:71:46]
wire _mshrs_6_io_schedule_bits_a_valid; // @[Scheduler.scala:71:46]
wire [8:0] _mshrs_6_io_schedule_bits_a_bits_tag; // @[Scheduler.scala:71:46]
wire [10:0] _mshrs_6_io_schedule_bits_a_bits_set; // @[Scheduler.scala:71:46]
wire [2:0] _mshrs_6_io_schedule_bits_a_bits_param; // @[Scheduler.scala:71:46]
wire _mshrs_6_io_schedule_bits_a_bits_block; // @[Scheduler.scala:71:46]
wire _mshrs_6_io_schedule_bits_b_valid; // @[Scheduler.scala:71:46]
wire [2:0] _mshrs_6_io_schedule_bits_b_bits_param; // @[Scheduler.scala:71:46]
wire [8:0] _mshrs_6_io_schedule_bits_b_bits_tag; // @[Scheduler.scala:71:46]
wire [10:0] _mshrs_6_io_schedule_bits_b_bits_set; // @[Scheduler.scala:71:46]
wire _mshrs_6_io_schedule_bits_b_bits_clients; // @[Scheduler.scala:71:46]
wire _mshrs_6_io_schedule_bits_c_valid; // @[Scheduler.scala:71:46]
wire [2:0] _mshrs_6_io_schedule_bits_c_bits_opcode; // @[Scheduler.scala:71:46]
wire [2:0] _mshrs_6_io_schedule_bits_c_bits_param; // @[Scheduler.scala:71:46]
wire [8:0] _mshrs_6_io_schedule_bits_c_bits_tag; // @[Scheduler.scala:71:46]
wire [10:0] _mshrs_6_io_schedule_bits_c_bits_set; // @[Scheduler.scala:71:46]
wire [3:0] _mshrs_6_io_schedule_bits_c_bits_way; // @[Scheduler.scala:71:46]
wire _mshrs_6_io_schedule_bits_c_bits_dirty; // @[Scheduler.scala:71:46]
wire _mshrs_6_io_schedule_bits_d_valid; // @[Scheduler.scala:71:46]
wire _mshrs_6_io_schedule_bits_d_bits_prio_0; // @[Scheduler.scala:71:46]
wire _mshrs_6_io_schedule_bits_d_bits_prio_1; // @[Scheduler.scala:71:46]
wire _mshrs_6_io_schedule_bits_d_bits_prio_2; // @[Scheduler.scala:71:46]
wire _mshrs_6_io_schedule_bits_d_bits_control; // @[Scheduler.scala:71:46]
wire [2:0] _mshrs_6_io_schedule_bits_d_bits_opcode; // @[Scheduler.scala:71:46]
wire [2:0] _mshrs_6_io_schedule_bits_d_bits_param; // @[Scheduler.scala:71:46]
wire [2:0] _mshrs_6_io_schedule_bits_d_bits_size; // @[Scheduler.scala:71:46]
wire [5:0] _mshrs_6_io_schedule_bits_d_bits_source; // @[Scheduler.scala:71:46]
wire [8:0] _mshrs_6_io_schedule_bits_d_bits_tag; // @[Scheduler.scala:71:46]
wire [5:0] _mshrs_6_io_schedule_bits_d_bits_offset; // @[Scheduler.scala:71:46]
wire [5:0] _mshrs_6_io_schedule_bits_d_bits_put; // @[Scheduler.scala:71:46]
wire [10:0] _mshrs_6_io_schedule_bits_d_bits_set; // @[Scheduler.scala:71:46]
wire [3:0] _mshrs_6_io_schedule_bits_d_bits_way; // @[Scheduler.scala:71:46]
wire _mshrs_6_io_schedule_bits_d_bits_bad; // @[Scheduler.scala:71:46]
wire _mshrs_6_io_schedule_bits_e_valid; // @[Scheduler.scala:71:46]
wire [2:0] _mshrs_6_io_schedule_bits_e_bits_sink; // @[Scheduler.scala:71:46]
wire _mshrs_6_io_schedule_bits_x_valid; // @[Scheduler.scala:71:46]
wire _mshrs_6_io_schedule_bits_dir_valid; // @[Scheduler.scala:71:46]
wire [10:0] _mshrs_6_io_schedule_bits_dir_bits_set; // @[Scheduler.scala:71:46]
wire [3:0] _mshrs_6_io_schedule_bits_dir_bits_way; // @[Scheduler.scala:71:46]
wire _mshrs_6_io_schedule_bits_dir_bits_data_dirty; // @[Scheduler.scala:71:46]
wire [1:0] _mshrs_6_io_schedule_bits_dir_bits_data_state; // @[Scheduler.scala:71:46]
wire _mshrs_6_io_schedule_bits_dir_bits_data_clients; // @[Scheduler.scala:71:46]
wire [8:0] _mshrs_6_io_schedule_bits_dir_bits_data_tag; // @[Scheduler.scala:71:46]
wire _mshrs_6_io_schedule_bits_reload; // @[Scheduler.scala:71:46]
wire _mshrs_5_io_status_valid; // @[Scheduler.scala:71:46]
wire [10:0] _mshrs_5_io_status_bits_set; // @[Scheduler.scala:71:46]
wire [8:0] _mshrs_5_io_status_bits_tag; // @[Scheduler.scala:71:46]
wire [3:0] _mshrs_5_io_status_bits_way; // @[Scheduler.scala:71:46]
wire _mshrs_5_io_status_bits_blockB; // @[Scheduler.scala:71:46]
wire _mshrs_5_io_status_bits_nestB; // @[Scheduler.scala:71:46]
wire _mshrs_5_io_status_bits_blockC; // @[Scheduler.scala:71:46]
wire _mshrs_5_io_status_bits_nestC; // @[Scheduler.scala:71:46]
wire _mshrs_5_io_schedule_valid; // @[Scheduler.scala:71:46]
wire _mshrs_5_io_schedule_bits_a_valid; // @[Scheduler.scala:71:46]
wire [8:0] _mshrs_5_io_schedule_bits_a_bits_tag; // @[Scheduler.scala:71:46]
wire [10:0] _mshrs_5_io_schedule_bits_a_bits_set; // @[Scheduler.scala:71:46]
wire [2:0] _mshrs_5_io_schedule_bits_a_bits_param; // @[Scheduler.scala:71:46]
wire _mshrs_5_io_schedule_bits_a_bits_block; // @[Scheduler.scala:71:46]
wire _mshrs_5_io_schedule_bits_b_valid; // @[Scheduler.scala:71:46]
wire [2:0] _mshrs_5_io_schedule_bits_b_bits_param; // @[Scheduler.scala:71:46]
wire [8:0] _mshrs_5_io_schedule_bits_b_bits_tag; // @[Scheduler.scala:71:46]
wire [10:0] _mshrs_5_io_schedule_bits_b_bits_set; // @[Scheduler.scala:71:46]
wire _mshrs_5_io_schedule_bits_b_bits_clients; // @[Scheduler.scala:71:46]
wire _mshrs_5_io_schedule_bits_c_valid; // @[Scheduler.scala:71:46]
wire [2:0] _mshrs_5_io_schedule_bits_c_bits_opcode; // @[Scheduler.scala:71:46]
wire [2:0] _mshrs_5_io_schedule_bits_c_bits_param; // @[Scheduler.scala:71:46]
wire [8:0] _mshrs_5_io_schedule_bits_c_bits_tag; // @[Scheduler.scala:71:46]
wire [10:0] _mshrs_5_io_schedule_bits_c_bits_set; // @[Scheduler.scala:71:46]
wire [3:0] _mshrs_5_io_schedule_bits_c_bits_way; // @[Scheduler.scala:71:46]
wire _mshrs_5_io_schedule_bits_c_bits_dirty; // @[Scheduler.scala:71:46]
wire _mshrs_5_io_schedule_bits_d_valid; // @[Scheduler.scala:71:46]
wire _mshrs_5_io_schedule_bits_d_bits_prio_0; // @[Scheduler.scala:71:46]
wire _mshrs_5_io_schedule_bits_d_bits_prio_1; // @[Scheduler.scala:71:46]
wire _mshrs_5_io_schedule_bits_d_bits_prio_2; // @[Scheduler.scala:71:46]
wire _mshrs_5_io_schedule_bits_d_bits_control; // @[Scheduler.scala:71:46]
wire [2:0] _mshrs_5_io_schedule_bits_d_bits_opcode; // @[Scheduler.scala:71:46]
wire [2:0] _mshrs_5_io_schedule_bits_d_bits_param; // @[Scheduler.scala:71:46]
wire [2:0] _mshrs_5_io_schedule_bits_d_bits_size; // @[Scheduler.scala:71:46]
wire [5:0] _mshrs_5_io_schedule_bits_d_bits_source; // @[Scheduler.scala:71:46]
wire [8:0] _mshrs_5_io_schedule_bits_d_bits_tag; // @[Scheduler.scala:71:46]
wire [5:0] _mshrs_5_io_schedule_bits_d_bits_offset; // @[Scheduler.scala:71:46]
wire [5:0] _mshrs_5_io_schedule_bits_d_bits_put; // @[Scheduler.scala:71:46]
wire [10:0] _mshrs_5_io_schedule_bits_d_bits_set; // @[Scheduler.scala:71:46]
wire [3:0] _mshrs_5_io_schedule_bits_d_bits_way; // @[Scheduler.scala:71:46]
wire _mshrs_5_io_schedule_bits_d_bits_bad; // @[Scheduler.scala:71:46]
wire _mshrs_5_io_schedule_bits_e_valid; // @[Scheduler.scala:71:46]
wire [2:0] _mshrs_5_io_schedule_bits_e_bits_sink; // @[Scheduler.scala:71:46]
wire _mshrs_5_io_schedule_bits_x_valid; // @[Scheduler.scala:71:46]
wire _mshrs_5_io_schedule_bits_dir_valid; // @[Scheduler.scala:71:46]
wire [10:0] _mshrs_5_io_schedule_bits_dir_bits_set; // @[Scheduler.scala:71:46]
wire [3:0] _mshrs_5_io_schedule_bits_dir_bits_way; // @[Scheduler.scala:71:46]
wire _mshrs_5_io_schedule_bits_dir_bits_data_dirty; // @[Scheduler.scala:71:46]
wire [1:0] _mshrs_5_io_schedule_bits_dir_bits_data_state; // @[Scheduler.scala:71:46]
wire _mshrs_5_io_schedule_bits_dir_bits_data_clients; // @[Scheduler.scala:71:46]
wire [8:0] _mshrs_5_io_schedule_bits_dir_bits_data_tag; // @[Scheduler.scala:71:46]
wire _mshrs_5_io_schedule_bits_reload; // @[Scheduler.scala:71:46]
wire _mshrs_4_io_status_valid; // @[Scheduler.scala:71:46]
wire [10:0] _mshrs_4_io_status_bits_set; // @[Scheduler.scala:71:46]
wire [8:0] _mshrs_4_io_status_bits_tag; // @[Scheduler.scala:71:46]
wire [3:0] _mshrs_4_io_status_bits_way; // @[Scheduler.scala:71:46]
wire _mshrs_4_io_status_bits_blockB; // @[Scheduler.scala:71:46]
wire _mshrs_4_io_status_bits_nestB; // @[Scheduler.scala:71:46]
wire _mshrs_4_io_status_bits_blockC; // @[Scheduler.scala:71:46]
wire _mshrs_4_io_status_bits_nestC; // @[Scheduler.scala:71:46]
wire _mshrs_4_io_schedule_valid; // @[Scheduler.scala:71:46]
wire _mshrs_4_io_schedule_bits_a_valid; // @[Scheduler.scala:71:46]
wire [8:0] _mshrs_4_io_schedule_bits_a_bits_tag; // @[Scheduler.scala:71:46]
wire [10:0] _mshrs_4_io_schedule_bits_a_bits_set; // @[Scheduler.scala:71:46]
wire [2:0] _mshrs_4_io_schedule_bits_a_bits_param; // @[Scheduler.scala:71:46]
wire _mshrs_4_io_schedule_bits_a_bits_block; // @[Scheduler.scala:71:46]
wire _mshrs_4_io_schedule_bits_b_valid; // @[Scheduler.scala:71:46]
wire [2:0] _mshrs_4_io_schedule_bits_b_bits_param; // @[Scheduler.scala:71:46]
wire [8:0] _mshrs_4_io_schedule_bits_b_bits_tag; // @[Scheduler.scala:71:46]
wire [10:0] _mshrs_4_io_schedule_bits_b_bits_set; // @[Scheduler.scala:71:46]
wire _mshrs_4_io_schedule_bits_b_bits_clients; // @[Scheduler.scala:71:46]
wire _mshrs_4_io_schedule_bits_c_valid; // @[Scheduler.scala:71:46]
wire [2:0] _mshrs_4_io_schedule_bits_c_bits_opcode; // @[Scheduler.scala:71:46]
wire [2:0] _mshrs_4_io_schedule_bits_c_bits_param; // @[Scheduler.scala:71:46]
wire [8:0] _mshrs_4_io_schedule_bits_c_bits_tag; // @[Scheduler.scala:71:46]
wire [10:0] _mshrs_4_io_schedule_bits_c_bits_set; // @[Scheduler.scala:71:46]
wire [3:0] _mshrs_4_io_schedule_bits_c_bits_way; // @[Scheduler.scala:71:46]
wire _mshrs_4_io_schedule_bits_c_bits_dirty; // @[Scheduler.scala:71:46]
wire _mshrs_4_io_schedule_bits_d_valid; // @[Scheduler.scala:71:46]
wire _mshrs_4_io_schedule_bits_d_bits_prio_0; // @[Scheduler.scala:71:46]
wire _mshrs_4_io_schedule_bits_d_bits_prio_1; // @[Scheduler.scala:71:46]
wire _mshrs_4_io_schedule_bits_d_bits_prio_2; // @[Scheduler.scala:71:46]
wire _mshrs_4_io_schedule_bits_d_bits_control; // @[Scheduler.scala:71:46]
wire [2:0] _mshrs_4_io_schedule_bits_d_bits_opcode; // @[Scheduler.scala:71:46]
wire [2:0] _mshrs_4_io_schedule_bits_d_bits_param; // @[Scheduler.scala:71:46]
wire [2:0] _mshrs_4_io_schedule_bits_d_bits_size; // @[Scheduler.scala:71:46]
wire [5:0] _mshrs_4_io_schedule_bits_d_bits_source; // @[Scheduler.scala:71:46]
wire [8:0] _mshrs_4_io_schedule_bits_d_bits_tag; // @[Scheduler.scala:71:46]
wire [5:0] _mshrs_4_io_schedule_bits_d_bits_offset; // @[Scheduler.scala:71:46]
wire [5:0] _mshrs_4_io_schedule_bits_d_bits_put; // @[Scheduler.scala:71:46]
wire [10:0] _mshrs_4_io_schedule_bits_d_bits_set; // @[Scheduler.scala:71:46]
wire [3:0] _mshrs_4_io_schedule_bits_d_bits_way; // @[Scheduler.scala:71:46]
wire _mshrs_4_io_schedule_bits_d_bits_bad; // @[Scheduler.scala:71:46]
wire _mshrs_4_io_schedule_bits_e_valid; // @[Scheduler.scala:71:46]
wire [2:0] _mshrs_4_io_schedule_bits_e_bits_sink; // @[Scheduler.scala:71:46]
wire _mshrs_4_io_schedule_bits_x_valid; // @[Scheduler.scala:71:46]
wire _mshrs_4_io_schedule_bits_dir_valid; // @[Scheduler.scala:71:46]
wire [10:0] _mshrs_4_io_schedule_bits_dir_bits_set; // @[Scheduler.scala:71:46]
wire [3:0] _mshrs_4_io_schedule_bits_dir_bits_way; // @[Scheduler.scala:71:46]
wire _mshrs_4_io_schedule_bits_dir_bits_data_dirty; // @[Scheduler.scala:71:46]
wire [1:0] _mshrs_4_io_schedule_bits_dir_bits_data_state; // @[Scheduler.scala:71:46]
wire _mshrs_4_io_schedule_bits_dir_bits_data_clients; // @[Scheduler.scala:71:46]
wire [8:0] _mshrs_4_io_schedule_bits_dir_bits_data_tag; // @[Scheduler.scala:71:46]
wire _mshrs_4_io_schedule_bits_reload; // @[Scheduler.scala:71:46]
wire _mshrs_3_io_status_valid; // @[Scheduler.scala:71:46]
wire [10:0] _mshrs_3_io_status_bits_set; // @[Scheduler.scala:71:46]
wire [8:0] _mshrs_3_io_status_bits_tag; // @[Scheduler.scala:71:46]
wire [3:0] _mshrs_3_io_status_bits_way; // @[Scheduler.scala:71:46]
wire _mshrs_3_io_status_bits_blockB; // @[Scheduler.scala:71:46]
wire _mshrs_3_io_status_bits_nestB; // @[Scheduler.scala:71:46]
wire _mshrs_3_io_status_bits_blockC; // @[Scheduler.scala:71:46]
wire _mshrs_3_io_status_bits_nestC; // @[Scheduler.scala:71:46]
wire _mshrs_3_io_schedule_valid; // @[Scheduler.scala:71:46]
wire _mshrs_3_io_schedule_bits_a_valid; // @[Scheduler.scala:71:46]
wire [8:0] _mshrs_3_io_schedule_bits_a_bits_tag; // @[Scheduler.scala:71:46]
wire [10:0] _mshrs_3_io_schedule_bits_a_bits_set; // @[Scheduler.scala:71:46]
wire [2:0] _mshrs_3_io_schedule_bits_a_bits_param; // @[Scheduler.scala:71:46]
wire _mshrs_3_io_schedule_bits_a_bits_block; // @[Scheduler.scala:71:46]
wire _mshrs_3_io_schedule_bits_b_valid; // @[Scheduler.scala:71:46]
wire [2:0] _mshrs_3_io_schedule_bits_b_bits_param; // @[Scheduler.scala:71:46]
wire [8:0] _mshrs_3_io_schedule_bits_b_bits_tag; // @[Scheduler.scala:71:46]
wire [10:0] _mshrs_3_io_schedule_bits_b_bits_set; // @[Scheduler.scala:71:46]
wire _mshrs_3_io_schedule_bits_b_bits_clients; // @[Scheduler.scala:71:46]
wire _mshrs_3_io_schedule_bits_c_valid; // @[Scheduler.scala:71:46]
wire [2:0] _mshrs_3_io_schedule_bits_c_bits_opcode; // @[Scheduler.scala:71:46]
wire [2:0] _mshrs_3_io_schedule_bits_c_bits_param; // @[Scheduler.scala:71:46]
wire [8:0] _mshrs_3_io_schedule_bits_c_bits_tag; // @[Scheduler.scala:71:46]
wire [10:0] _mshrs_3_io_schedule_bits_c_bits_set; // @[Scheduler.scala:71:46]
wire [3:0] _mshrs_3_io_schedule_bits_c_bits_way; // @[Scheduler.scala:71:46]
wire _mshrs_3_io_schedule_bits_c_bits_dirty; // @[Scheduler.scala:71:46]
wire _mshrs_3_io_schedule_bits_d_valid; // @[Scheduler.scala:71:46]
wire _mshrs_3_io_schedule_bits_d_bits_prio_0; // @[Scheduler.scala:71:46]
wire _mshrs_3_io_schedule_bits_d_bits_prio_1; // @[Scheduler.scala:71:46]
wire _mshrs_3_io_schedule_bits_d_bits_prio_2; // @[Scheduler.scala:71:46]
wire _mshrs_3_io_schedule_bits_d_bits_control; // @[Scheduler.scala:71:46]
wire [2:0] _mshrs_3_io_schedule_bits_d_bits_opcode; // @[Scheduler.scala:71:46]
wire [2:0] _mshrs_3_io_schedule_bits_d_bits_param; // @[Scheduler.scala:71:46]
wire [2:0] _mshrs_3_io_schedule_bits_d_bits_size; // @[Scheduler.scala:71:46]
wire [5:0] _mshrs_3_io_schedule_bits_d_bits_source; // @[Scheduler.scala:71:46]
wire [8:0] _mshrs_3_io_schedule_bits_d_bits_tag; // @[Scheduler.scala:71:46]
wire [5:0] _mshrs_3_io_schedule_bits_d_bits_offset; // @[Scheduler.scala:71:46]
wire [5:0] _mshrs_3_io_schedule_bits_d_bits_put; // @[Scheduler.scala:71:46]
wire [10:0] _mshrs_3_io_schedule_bits_d_bits_set; // @[Scheduler.scala:71:46]
wire [3:0] _mshrs_3_io_schedule_bits_d_bits_way; // @[Scheduler.scala:71:46]
wire _mshrs_3_io_schedule_bits_d_bits_bad; // @[Scheduler.scala:71:46]
wire _mshrs_3_io_schedule_bits_e_valid; // @[Scheduler.scala:71:46]
wire [2:0] _mshrs_3_io_schedule_bits_e_bits_sink; // @[Scheduler.scala:71:46]
wire _mshrs_3_io_schedule_bits_x_valid; // @[Scheduler.scala:71:46]
wire _mshrs_3_io_schedule_bits_dir_valid; // @[Scheduler.scala:71:46]
wire [10:0] _mshrs_3_io_schedule_bits_dir_bits_set; // @[Scheduler.scala:71:46]
wire [3:0] _mshrs_3_io_schedule_bits_dir_bits_way; // @[Scheduler.scala:71:46]
wire _mshrs_3_io_schedule_bits_dir_bits_data_dirty; // @[Scheduler.scala:71:46]
wire [1:0] _mshrs_3_io_schedule_bits_dir_bits_data_state; // @[Scheduler.scala:71:46]
wire _mshrs_3_io_schedule_bits_dir_bits_data_clients; // @[Scheduler.scala:71:46]
wire [8:0] _mshrs_3_io_schedule_bits_dir_bits_data_tag; // @[Scheduler.scala:71:46]
wire _mshrs_3_io_schedule_bits_reload; // @[Scheduler.scala:71:46]
wire _mshrs_2_io_status_valid; // @[Scheduler.scala:71:46]
wire [10:0] _mshrs_2_io_status_bits_set; // @[Scheduler.scala:71:46]
wire [8:0] _mshrs_2_io_status_bits_tag; // @[Scheduler.scala:71:46]
wire [3:0] _mshrs_2_io_status_bits_way; // @[Scheduler.scala:71:46]
wire _mshrs_2_io_status_bits_blockB; // @[Scheduler.scala:71:46]
wire _mshrs_2_io_status_bits_nestB; // @[Scheduler.scala:71:46]
wire _mshrs_2_io_status_bits_blockC; // @[Scheduler.scala:71:46]
wire _mshrs_2_io_status_bits_nestC; // @[Scheduler.scala:71:46]
wire _mshrs_2_io_schedule_valid; // @[Scheduler.scala:71:46]
wire _mshrs_2_io_schedule_bits_a_valid; // @[Scheduler.scala:71:46]
wire [8:0] _mshrs_2_io_schedule_bits_a_bits_tag; // @[Scheduler.scala:71:46]
wire [10:0] _mshrs_2_io_schedule_bits_a_bits_set; // @[Scheduler.scala:71:46]
wire [2:0] _mshrs_2_io_schedule_bits_a_bits_param; // @[Scheduler.scala:71:46]
wire _mshrs_2_io_schedule_bits_a_bits_block; // @[Scheduler.scala:71:46]
wire _mshrs_2_io_schedule_bits_b_valid; // @[Scheduler.scala:71:46]
wire [2:0] _mshrs_2_io_schedule_bits_b_bits_param; // @[Scheduler.scala:71:46]
wire [8:0] _mshrs_2_io_schedule_bits_b_bits_tag; // @[Scheduler.scala:71:46]
wire [10:0] _mshrs_2_io_schedule_bits_b_bits_set; // @[Scheduler.scala:71:46]
wire _mshrs_2_io_schedule_bits_b_bits_clients; // @[Scheduler.scala:71:46]
wire _mshrs_2_io_schedule_bits_c_valid; // @[Scheduler.scala:71:46]
wire [2:0] _mshrs_2_io_schedule_bits_c_bits_opcode; // @[Scheduler.scala:71:46]
wire [2:0] _mshrs_2_io_schedule_bits_c_bits_param; // @[Scheduler.scala:71:46]
wire [8:0] _mshrs_2_io_schedule_bits_c_bits_tag; // @[Scheduler.scala:71:46]
wire [10:0] _mshrs_2_io_schedule_bits_c_bits_set; // @[Scheduler.scala:71:46]
wire [3:0] _mshrs_2_io_schedule_bits_c_bits_way; // @[Scheduler.scala:71:46]
wire _mshrs_2_io_schedule_bits_c_bits_dirty; // @[Scheduler.scala:71:46]
wire _mshrs_2_io_schedule_bits_d_valid; // @[Scheduler.scala:71:46]
wire _mshrs_2_io_schedule_bits_d_bits_prio_0; // @[Scheduler.scala:71:46]
wire _mshrs_2_io_schedule_bits_d_bits_prio_1; // @[Scheduler.scala:71:46]
wire _mshrs_2_io_schedule_bits_d_bits_prio_2; // @[Scheduler.scala:71:46]
wire _mshrs_2_io_schedule_bits_d_bits_control; // @[Scheduler.scala:71:46]
wire [2:0] _mshrs_2_io_schedule_bits_d_bits_opcode; // @[Scheduler.scala:71:46]
wire [2:0] _mshrs_2_io_schedule_bits_d_bits_param; // @[Scheduler.scala:71:46]
wire [2:0] _mshrs_2_io_schedule_bits_d_bits_size; // @[Scheduler.scala:71:46]
wire [5:0] _mshrs_2_io_schedule_bits_d_bits_source; // @[Scheduler.scala:71:46]
wire [8:0] _mshrs_2_io_schedule_bits_d_bits_tag; // @[Scheduler.scala:71:46]
wire [5:0] _mshrs_2_io_schedule_bits_d_bits_offset; // @[Scheduler.scala:71:46]
wire [5:0] _mshrs_2_io_schedule_bits_d_bits_put; // @[Scheduler.scala:71:46]
wire [10:0] _mshrs_2_io_schedule_bits_d_bits_set; // @[Scheduler.scala:71:46]
wire [3:0] _mshrs_2_io_schedule_bits_d_bits_way; // @[Scheduler.scala:71:46]
wire _mshrs_2_io_schedule_bits_d_bits_bad; // @[Scheduler.scala:71:46]
wire _mshrs_2_io_schedule_bits_e_valid; // @[Scheduler.scala:71:46]
wire [2:0] _mshrs_2_io_schedule_bits_e_bits_sink; // @[Scheduler.scala:71:46]
wire _mshrs_2_io_schedule_bits_x_valid; // @[Scheduler.scala:71:46]
wire _mshrs_2_io_schedule_bits_dir_valid; // @[Scheduler.scala:71:46]
wire [10:0] _mshrs_2_io_schedule_bits_dir_bits_set; // @[Scheduler.scala:71:46]
wire [3:0] _mshrs_2_io_schedule_bits_dir_bits_way; // @[Scheduler.scala:71:46]
wire _mshrs_2_io_schedule_bits_dir_bits_data_dirty; // @[Scheduler.scala:71:46]
wire [1:0] _mshrs_2_io_schedule_bits_dir_bits_data_state; // @[Scheduler.scala:71:46]
wire _mshrs_2_io_schedule_bits_dir_bits_data_clients; // @[Scheduler.scala:71:46]
wire [8:0] _mshrs_2_io_schedule_bits_dir_bits_data_tag; // @[Scheduler.scala:71:46]
wire _mshrs_2_io_schedule_bits_reload; // @[Scheduler.scala:71:46]
wire _mshrs_1_io_status_valid; // @[Scheduler.scala:71:46]
wire [10:0] _mshrs_1_io_status_bits_set; // @[Scheduler.scala:71:46]
wire [8:0] _mshrs_1_io_status_bits_tag; // @[Scheduler.scala:71:46]
wire [3:0] _mshrs_1_io_status_bits_way; // @[Scheduler.scala:71:46]
wire _mshrs_1_io_status_bits_blockB; // @[Scheduler.scala:71:46]
wire _mshrs_1_io_status_bits_nestB; // @[Scheduler.scala:71:46]
wire _mshrs_1_io_status_bits_blockC; // @[Scheduler.scala:71:46]
wire _mshrs_1_io_status_bits_nestC; // @[Scheduler.scala:71:46]
wire _mshrs_1_io_schedule_valid; // @[Scheduler.scala:71:46]
wire _mshrs_1_io_schedule_bits_a_valid; // @[Scheduler.scala:71:46]
wire [8:0] _mshrs_1_io_schedule_bits_a_bits_tag; // @[Scheduler.scala:71:46]
wire [10:0] _mshrs_1_io_schedule_bits_a_bits_set; // @[Scheduler.scala:71:46]
wire [2:0] _mshrs_1_io_schedule_bits_a_bits_param; // @[Scheduler.scala:71:46]
wire _mshrs_1_io_schedule_bits_a_bits_block; // @[Scheduler.scala:71:46]
wire _mshrs_1_io_schedule_bits_b_valid; // @[Scheduler.scala:71:46]
wire [2:0] _mshrs_1_io_schedule_bits_b_bits_param; // @[Scheduler.scala:71:46]
wire [8:0] _mshrs_1_io_schedule_bits_b_bits_tag; // @[Scheduler.scala:71:46]
wire [10:0] _mshrs_1_io_schedule_bits_b_bits_set; // @[Scheduler.scala:71:46]
wire _mshrs_1_io_schedule_bits_b_bits_clients; // @[Scheduler.scala:71:46]
wire _mshrs_1_io_schedule_bits_c_valid; // @[Scheduler.scala:71:46]
wire [2:0] _mshrs_1_io_schedule_bits_c_bits_opcode; // @[Scheduler.scala:71:46]
wire [2:0] _mshrs_1_io_schedule_bits_c_bits_param; // @[Scheduler.scala:71:46]
wire [8:0] _mshrs_1_io_schedule_bits_c_bits_tag; // @[Scheduler.scala:71:46]
wire [10:0] _mshrs_1_io_schedule_bits_c_bits_set; // @[Scheduler.scala:71:46]
wire [3:0] _mshrs_1_io_schedule_bits_c_bits_way; // @[Scheduler.scala:71:46]
wire _mshrs_1_io_schedule_bits_c_bits_dirty; // @[Scheduler.scala:71:46]
wire _mshrs_1_io_schedule_bits_d_valid; // @[Scheduler.scala:71:46]
wire _mshrs_1_io_schedule_bits_d_bits_prio_0; // @[Scheduler.scala:71:46]
wire _mshrs_1_io_schedule_bits_d_bits_prio_1; // @[Scheduler.scala:71:46]
wire _mshrs_1_io_schedule_bits_d_bits_prio_2; // @[Scheduler.scala:71:46]
wire _mshrs_1_io_schedule_bits_d_bits_control; // @[Scheduler.scala:71:46]
wire [2:0] _mshrs_1_io_schedule_bits_d_bits_opcode; // @[Scheduler.scala:71:46]
wire [2:0] _mshrs_1_io_schedule_bits_d_bits_param; // @[Scheduler.scala:71:46]
wire [2:0] _mshrs_1_io_schedule_bits_d_bits_size; // @[Scheduler.scala:71:46]
wire [5:0] _mshrs_1_io_schedule_bits_d_bits_source; // @[Scheduler.scala:71:46]
wire [8:0] _mshrs_1_io_schedule_bits_d_bits_tag; // @[Scheduler.scala:71:46]
wire [5:0] _mshrs_1_io_schedule_bits_d_bits_offset; // @[Scheduler.scala:71:46]
wire [5:0] _mshrs_1_io_schedule_bits_d_bits_put; // @[Scheduler.scala:71:46]
wire [10:0] _mshrs_1_io_schedule_bits_d_bits_set; // @[Scheduler.scala:71:46]
wire [3:0] _mshrs_1_io_schedule_bits_d_bits_way; // @[Scheduler.scala:71:46]
wire _mshrs_1_io_schedule_bits_d_bits_bad; // @[Scheduler.scala:71:46]
wire _mshrs_1_io_schedule_bits_e_valid; // @[Scheduler.scala:71:46]
wire [2:0] _mshrs_1_io_schedule_bits_e_bits_sink; // @[Scheduler.scala:71:46]
wire _mshrs_1_io_schedule_bits_x_valid; // @[Scheduler.scala:71:46]
wire _mshrs_1_io_schedule_bits_dir_valid; // @[Scheduler.scala:71:46]
wire [10:0] _mshrs_1_io_schedule_bits_dir_bits_set; // @[Scheduler.scala:71:46]
wire [3:0] _mshrs_1_io_schedule_bits_dir_bits_way; // @[Scheduler.scala:71:46]
wire _mshrs_1_io_schedule_bits_dir_bits_data_dirty; // @[Scheduler.scala:71:46]
wire [1:0] _mshrs_1_io_schedule_bits_dir_bits_data_state; // @[Scheduler.scala:71:46]
wire _mshrs_1_io_schedule_bits_dir_bits_data_clients; // @[Scheduler.scala:71:46]
wire [8:0] _mshrs_1_io_schedule_bits_dir_bits_data_tag; // @[Scheduler.scala:71:46]
wire _mshrs_1_io_schedule_bits_reload; // @[Scheduler.scala:71:46]
wire _mshrs_0_io_status_valid; // @[Scheduler.scala:71:46]
wire [10:0] _mshrs_0_io_status_bits_set; // @[Scheduler.scala:71:46]
wire [8:0] _mshrs_0_io_status_bits_tag; // @[Scheduler.scala:71:46]
wire [3:0] _mshrs_0_io_status_bits_way; // @[Scheduler.scala:71:46]
wire _mshrs_0_io_status_bits_blockB; // @[Scheduler.scala:71:46]
wire _mshrs_0_io_status_bits_nestB; // @[Scheduler.scala:71:46]
wire _mshrs_0_io_status_bits_blockC; // @[Scheduler.scala:71:46]
wire _mshrs_0_io_status_bits_nestC; // @[Scheduler.scala:71:46]
wire _mshrs_0_io_schedule_valid; // @[Scheduler.scala:71:46]
wire _mshrs_0_io_schedule_bits_a_valid; // @[Scheduler.scala:71:46]
wire [8:0] _mshrs_0_io_schedule_bits_a_bits_tag; // @[Scheduler.scala:71:46]
wire [10:0] _mshrs_0_io_schedule_bits_a_bits_set; // @[Scheduler.scala:71:46]
wire [2:0] _mshrs_0_io_schedule_bits_a_bits_param; // @[Scheduler.scala:71:46]
wire _mshrs_0_io_schedule_bits_a_bits_block; // @[Scheduler.scala:71:46]
wire _mshrs_0_io_schedule_bits_b_valid; // @[Scheduler.scala:71:46]
wire [2:0] _mshrs_0_io_schedule_bits_b_bits_param; // @[Scheduler.scala:71:46]
wire [8:0] _mshrs_0_io_schedule_bits_b_bits_tag; // @[Scheduler.scala:71:46]
wire [10:0] _mshrs_0_io_schedule_bits_b_bits_set; // @[Scheduler.scala:71:46]
wire _mshrs_0_io_schedule_bits_b_bits_clients; // @[Scheduler.scala:71:46]
wire _mshrs_0_io_schedule_bits_c_valid; // @[Scheduler.scala:71:46]
wire [2:0] _mshrs_0_io_schedule_bits_c_bits_opcode; // @[Scheduler.scala:71:46]
wire [2:0] _mshrs_0_io_schedule_bits_c_bits_param; // @[Scheduler.scala:71:46]
wire [8:0] _mshrs_0_io_schedule_bits_c_bits_tag; // @[Scheduler.scala:71:46]
wire [10:0] _mshrs_0_io_schedule_bits_c_bits_set; // @[Scheduler.scala:71:46]
wire [3:0] _mshrs_0_io_schedule_bits_c_bits_way; // @[Scheduler.scala:71:46]
wire _mshrs_0_io_schedule_bits_c_bits_dirty; // @[Scheduler.scala:71:46]
wire _mshrs_0_io_schedule_bits_d_valid; // @[Scheduler.scala:71:46]
wire _mshrs_0_io_schedule_bits_d_bits_prio_0; // @[Scheduler.scala:71:46]
wire _mshrs_0_io_schedule_bits_d_bits_prio_1; // @[Scheduler.scala:71:46]
wire _mshrs_0_io_schedule_bits_d_bits_prio_2; // @[Scheduler.scala:71:46]
wire _mshrs_0_io_schedule_bits_d_bits_control; // @[Scheduler.scala:71:46]
wire [2:0] _mshrs_0_io_schedule_bits_d_bits_opcode; // @[Scheduler.scala:71:46]
wire [2:0] _mshrs_0_io_schedule_bits_d_bits_param; // @[Scheduler.scala:71:46]
wire [2:0] _mshrs_0_io_schedule_bits_d_bits_size; // @[Scheduler.scala:71:46]
wire [5:0] _mshrs_0_io_schedule_bits_d_bits_source; // @[Scheduler.scala:71:46]
wire [8:0] _mshrs_0_io_schedule_bits_d_bits_tag; // @[Scheduler.scala:71:46]
wire [5:0] _mshrs_0_io_schedule_bits_d_bits_offset; // @[Scheduler.scala:71:46]
wire [5:0] _mshrs_0_io_schedule_bits_d_bits_put; // @[Scheduler.scala:71:46]
wire [10:0] _mshrs_0_io_schedule_bits_d_bits_set; // @[Scheduler.scala:71:46]
wire [3:0] _mshrs_0_io_schedule_bits_d_bits_way; // @[Scheduler.scala:71:46]
wire _mshrs_0_io_schedule_bits_d_bits_bad; // @[Scheduler.scala:71:46]
wire _mshrs_0_io_schedule_bits_e_valid; // @[Scheduler.scala:71:46]
wire [2:0] _mshrs_0_io_schedule_bits_e_bits_sink; // @[Scheduler.scala:71:46]
wire _mshrs_0_io_schedule_bits_x_valid; // @[Scheduler.scala:71:46]
wire _mshrs_0_io_schedule_bits_dir_valid; // @[Scheduler.scala:71:46]
wire [10:0] _mshrs_0_io_schedule_bits_dir_bits_set; // @[Scheduler.scala:71:46]
wire [3:0] _mshrs_0_io_schedule_bits_dir_bits_way; // @[Scheduler.scala:71:46]
wire _mshrs_0_io_schedule_bits_dir_bits_data_dirty; // @[Scheduler.scala:71:46]
wire [1:0] _mshrs_0_io_schedule_bits_dir_bits_data_state; // @[Scheduler.scala:71:46]
wire _mshrs_0_io_schedule_bits_dir_bits_data_clients; // @[Scheduler.scala:71:46]
wire [8:0] _mshrs_0_io_schedule_bits_dir_bits_data_tag; // @[Scheduler.scala:71:46]
wire _mshrs_0_io_schedule_bits_reload; // @[Scheduler.scala:71:46]
wire _requests_io_push_ready; // @[Scheduler.scala:70:24]
wire [35:0] _requests_io_valid; // @[Scheduler.scala:70:24]
wire _requests_io_data_prio_0; // @[Scheduler.scala:70:24]
wire _requests_io_data_prio_1; // @[Scheduler.scala:70:24]
wire _requests_io_data_prio_2; // @[Scheduler.scala:70:24]
wire _requests_io_data_control; // @[Scheduler.scala:70:24]
wire [2:0] _requests_io_data_opcode; // @[Scheduler.scala:70:24]
wire [2:0] _requests_io_data_param; // @[Scheduler.scala:70:24]
wire [2:0] _requests_io_data_size; // @[Scheduler.scala:70:24]
wire [5:0] _requests_io_data_source; // @[Scheduler.scala:70:24]
wire [8:0] _requests_io_data_tag; // @[Scheduler.scala:70:24]
wire [5:0] _requests_io_data_offset; // @[Scheduler.scala:70:24]
wire [5:0] _requests_io_data_put; // @[Scheduler.scala:70:24]
wire _bankedStore_io_sinkC_adr_ready; // @[Scheduler.scala:69:27]
wire _bankedStore_io_sinkD_adr_ready; // @[Scheduler.scala:69:27]
wire _bankedStore_io_sourceC_adr_ready; // @[Scheduler.scala:69:27]
wire [63:0] _bankedStore_io_sourceC_dat_data; // @[Scheduler.scala:69:27]
wire _bankedStore_io_sourceD_radr_ready; // @[Scheduler.scala:69:27]
wire [127:0] _bankedStore_io_sourceD_rdat_data; // @[Scheduler.scala:69:27]
wire _bankedStore_io_sourceD_wadr_ready; // @[Scheduler.scala:69:27]
wire _directory_io_write_ready; // @[Scheduler.scala:68:25]
wire _directory_io_result_bits_dirty; // @[Scheduler.scala:68:25]
wire [1:0] _directory_io_result_bits_state; // @[Scheduler.scala:68:25]
wire _directory_io_result_bits_clients; // @[Scheduler.scala:68:25]
wire [8:0] _directory_io_result_bits_tag; // @[Scheduler.scala:68:25]
wire _directory_io_result_bits_hit; // @[Scheduler.scala:68:25]
wire [3:0] _directory_io_result_bits_way; // @[Scheduler.scala:68:25]
wire _directory_io_ready; // @[Scheduler.scala:68:25]
wire _sinkX_io_req_valid; // @[Scheduler.scala:58:21]
wire [8:0] _sinkX_io_req_bits_tag; // @[Scheduler.scala:58:21]
wire [10:0] _sinkX_io_req_bits_set; // @[Scheduler.scala:58:21]
wire _sinkE_io_resp_valid; // @[Scheduler.scala:57:21]
wire [3:0] _sinkE_io_resp_bits_sink; // @[Scheduler.scala:57:21]
wire _sinkD_io_resp_valid; // @[Scheduler.scala:56:21]
wire _sinkD_io_resp_bits_last; // @[Scheduler.scala:56:21]
wire [2:0] _sinkD_io_resp_bits_opcode; // @[Scheduler.scala:56:21]
wire [2:0] _sinkD_io_resp_bits_param; // @[Scheduler.scala:56:21]
wire [3:0] _sinkD_io_resp_bits_source; // @[Scheduler.scala:56:21]
wire [2:0] _sinkD_io_resp_bits_sink; // @[Scheduler.scala:56:21]
wire _sinkD_io_resp_bits_denied; // @[Scheduler.scala:56:21]
wire [3:0] _sinkD_io_source; // @[Scheduler.scala:56:21]
wire _sinkD_io_bs_adr_valid; // @[Scheduler.scala:56:21]
wire _sinkD_io_bs_adr_bits_noop; // @[Scheduler.scala:56:21]
wire [3:0] _sinkD_io_bs_adr_bits_way; // @[Scheduler.scala:56:21]
wire [10:0] _sinkD_io_bs_adr_bits_set; // @[Scheduler.scala:56:21]
wire [2:0] _sinkD_io_bs_adr_bits_beat; // @[Scheduler.scala:56:21]
wire [63:0] _sinkD_io_bs_dat_data; // @[Scheduler.scala:56:21]
wire [10:0] _sinkD_io_grant_req_set; // @[Scheduler.scala:56:21]
wire [3:0] _sinkD_io_grant_req_way; // @[Scheduler.scala:56:21]
wire _sinkC_io_req_valid; // @[Scheduler.scala:55:21]
wire [2:0] _sinkC_io_req_bits_opcode; // @[Scheduler.scala:55:21]
wire [2:0] _sinkC_io_req_bits_param; // @[Scheduler.scala:55:21]
wire [2:0] _sinkC_io_req_bits_size; // @[Scheduler.scala:55:21]
wire [5:0] _sinkC_io_req_bits_source; // @[Scheduler.scala:55:21]
wire [8:0] _sinkC_io_req_bits_tag; // @[Scheduler.scala:55:21]
wire [5:0] _sinkC_io_req_bits_offset; // @[Scheduler.scala:55:21]
wire [5:0] _sinkC_io_req_bits_put; // @[Scheduler.scala:55:21]
wire [10:0] _sinkC_io_req_bits_set; // @[Scheduler.scala:55:21]
wire _sinkC_io_resp_valid; // @[Scheduler.scala:55:21]
wire _sinkC_io_resp_bits_last; // @[Scheduler.scala:55:21]
wire [10:0] _sinkC_io_resp_bits_set; // @[Scheduler.scala:55:21]
wire [8:0] _sinkC_io_resp_bits_tag; // @[Scheduler.scala:55:21]
wire [5:0] _sinkC_io_resp_bits_source; // @[Scheduler.scala:55:21]
wire [2:0] _sinkC_io_resp_bits_param; // @[Scheduler.scala:55:21]
wire _sinkC_io_resp_bits_data; // @[Scheduler.scala:55:21]
wire [10:0] _sinkC_io_set; // @[Scheduler.scala:55:21]
wire _sinkC_io_bs_adr_valid; // @[Scheduler.scala:55:21]
wire _sinkC_io_bs_adr_bits_noop; // @[Scheduler.scala:55:21]
wire [3:0] _sinkC_io_bs_adr_bits_way; // @[Scheduler.scala:55:21]
wire [10:0] _sinkC_io_bs_adr_bits_set; // @[Scheduler.scala:55:21]
wire [1:0] _sinkC_io_bs_adr_bits_beat; // @[Scheduler.scala:55:21]
wire [1:0] _sinkC_io_bs_adr_bits_mask; // @[Scheduler.scala:55:21]
wire [127:0] _sinkC_io_bs_dat_data; // @[Scheduler.scala:55:21]
wire _sinkC_io_rel_pop_ready; // @[Scheduler.scala:55:21]
wire [127:0] _sinkC_io_rel_beat_data; // @[Scheduler.scala:55:21]
wire _sinkC_io_rel_beat_corrupt; // @[Scheduler.scala:55:21]
wire _sinkA_io_req_valid; // @[Scheduler.scala:54:21]
wire [2:0] _sinkA_io_req_bits_opcode; // @[Scheduler.scala:54:21]
wire [2:0] _sinkA_io_req_bits_param; // @[Scheduler.scala:54:21]
wire [2:0] _sinkA_io_req_bits_size; // @[Scheduler.scala:54:21]
wire [5:0] _sinkA_io_req_bits_source; // @[Scheduler.scala:54:21]
wire [8:0] _sinkA_io_req_bits_tag; // @[Scheduler.scala:54:21]
wire [5:0] _sinkA_io_req_bits_offset; // @[Scheduler.scala:54:21]
wire [5:0] _sinkA_io_req_bits_put; // @[Scheduler.scala:54:21]
wire [10:0] _sinkA_io_req_bits_set; // @[Scheduler.scala:54:21]
wire _sinkA_io_pb_pop_ready; // @[Scheduler.scala:54:21]
wire [127:0] _sinkA_io_pb_beat_data; // @[Scheduler.scala:54:21]
wire [15:0] _sinkA_io_pb_beat_mask; // @[Scheduler.scala:54:21]
wire _sinkA_io_pb_beat_corrupt; // @[Scheduler.scala:54:21]
wire _sourceX_io_req_ready; // @[Scheduler.scala:45:23]
wire _sourceE_io_req_ready; // @[Scheduler.scala:44:23]
wire _sourceD_io_req_ready; // @[Scheduler.scala:43:23]
wire _sourceD_io_pb_pop_valid; // @[Scheduler.scala:43:23]
wire [5:0] _sourceD_io_pb_pop_bits_index; // @[Scheduler.scala:43:23]
wire _sourceD_io_pb_pop_bits_last; // @[Scheduler.scala:43:23]
wire _sourceD_io_rel_pop_valid; // @[Scheduler.scala:43:23]
wire [5:0] _sourceD_io_rel_pop_bits_index; // @[Scheduler.scala:43:23]
wire _sourceD_io_rel_pop_bits_last; // @[Scheduler.scala:43:23]
wire _sourceD_io_bs_radr_valid; // @[Scheduler.scala:43:23]
wire [3:0] _sourceD_io_bs_radr_bits_way; // @[Scheduler.scala:43:23]
wire [10:0] _sourceD_io_bs_radr_bits_set; // @[Scheduler.scala:43:23]
wire [1:0] _sourceD_io_bs_radr_bits_beat; // @[Scheduler.scala:43:23]
wire [1:0] _sourceD_io_bs_radr_bits_mask; // @[Scheduler.scala:43:23]
wire _sourceD_io_bs_wadr_valid; // @[Scheduler.scala:43:23]
wire [3:0] _sourceD_io_bs_wadr_bits_way; // @[Scheduler.scala:43:23]
wire [10:0] _sourceD_io_bs_wadr_bits_set; // @[Scheduler.scala:43:23]
wire [1:0] _sourceD_io_bs_wadr_bits_beat; // @[Scheduler.scala:43:23]
wire [1:0] _sourceD_io_bs_wadr_bits_mask; // @[Scheduler.scala:43:23]
wire [127:0] _sourceD_io_bs_wdat_data; // @[Scheduler.scala:43:23]
wire _sourceD_io_evict_safe; // @[Scheduler.scala:43:23]
wire _sourceD_io_grant_safe; // @[Scheduler.scala:43:23]
wire _sourceC_io_req_ready; // @[Scheduler.scala:42:23]
wire _sourceC_io_bs_adr_valid; // @[Scheduler.scala:42:23]
wire [3:0] _sourceC_io_bs_adr_bits_way; // @[Scheduler.scala:42:23]
wire [10:0] _sourceC_io_bs_adr_bits_set; // @[Scheduler.scala:42:23]
wire [2:0] _sourceC_io_bs_adr_bits_beat; // @[Scheduler.scala:42:23]
wire [10:0] _sourceC_io_evict_req_set; // @[Scheduler.scala:42:23]
wire [3:0] _sourceC_io_evict_req_way; // @[Scheduler.scala:42:23]
wire _sourceB_io_req_ready; // @[Scheduler.scala:41:23]
wire _sourceA_io_req_ready; // @[Scheduler.scala:40:23]
wire io_in_a_valid_0 = io_in_a_valid; // @[Scheduler.scala:27:7]
wire [2:0] io_in_a_bits_opcode_0 = io_in_a_bits_opcode; // @[Scheduler.scala:27:7]
wire [2:0] io_in_a_bits_param_0 = io_in_a_bits_param; // @[Scheduler.scala:27:7]
wire [2:0] io_in_a_bits_size_0 = io_in_a_bits_size; // @[Scheduler.scala:27:7]
wire [5:0] io_in_a_bits_source_0 = io_in_a_bits_source; // @[Scheduler.scala:27:7]
wire [31:0] io_in_a_bits_address_0 = io_in_a_bits_address; // @[Scheduler.scala:27:7]
wire [15:0] io_in_a_bits_mask_0 = io_in_a_bits_mask; // @[Scheduler.scala:27:7]
wire [127:0] io_in_a_bits_data_0 = io_in_a_bits_data; // @[Scheduler.scala:27:7]
wire io_in_a_bits_corrupt_0 = io_in_a_bits_corrupt; // @[Scheduler.scala:27:7]
wire io_in_b_ready_0 = io_in_b_ready; // @[Scheduler.scala:27:7]
wire io_in_c_valid_0 = io_in_c_valid; // @[Scheduler.scala:27:7]
wire [2:0] io_in_c_bits_opcode_0 = io_in_c_bits_opcode; // @[Scheduler.scala:27:7]
wire [2:0] io_in_c_bits_param_0 = io_in_c_bits_param; // @[Scheduler.scala:27:7]
wire [2:0] io_in_c_bits_size_0 = io_in_c_bits_size; // @[Scheduler.scala:27:7]
wire [5:0] io_in_c_bits_source_0 = io_in_c_bits_source; // @[Scheduler.scala:27:7]
wire [31:0] io_in_c_bits_address_0 = io_in_c_bits_address; // @[Scheduler.scala:27:7]
wire [127:0] io_in_c_bits_data_0 = io_in_c_bits_data; // @[Scheduler.scala:27:7]
wire io_in_c_bits_corrupt_0 = io_in_c_bits_corrupt; // @[Scheduler.scala:27:7]
wire io_in_d_ready_0 = io_in_d_ready; // @[Scheduler.scala:27:7]
wire io_in_e_valid_0 = io_in_e_valid; // @[Scheduler.scala:27:7]
wire [3:0] io_in_e_bits_sink_0 = io_in_e_bits_sink; // @[Scheduler.scala:27:7]
wire io_out_a_ready_0 = io_out_a_ready; // @[Scheduler.scala:27:7]
wire io_out_c_ready_0 = io_out_c_ready; // @[Scheduler.scala:27:7]
wire io_out_d_valid_0 = io_out_d_valid; // @[Scheduler.scala:27:7]
wire [2:0] io_out_d_bits_opcode_0 = io_out_d_bits_opcode; // @[Scheduler.scala:27:7]
wire [1:0] io_out_d_bits_param_0 = io_out_d_bits_param; // @[Scheduler.scala:27:7]
wire [2:0] io_out_d_bits_size_0 = io_out_d_bits_size; // @[Scheduler.scala:27:7]
wire [3:0] io_out_d_bits_source_0 = io_out_d_bits_source; // @[Scheduler.scala:27:7]
wire [2:0] io_out_d_bits_sink_0 = io_out_d_bits_sink; // @[Scheduler.scala:27:7]
wire io_out_d_bits_denied_0 = io_out_d_bits_denied; // @[Scheduler.scala:27:7]
wire [63:0] io_out_d_bits_data_0 = io_out_d_bits_data; // @[Scheduler.scala:27:7]
wire io_out_d_bits_corrupt_0 = io_out_d_bits_corrupt; // @[Scheduler.scala:27:7]
wire io_req_valid_0 = io_req_valid; // @[Scheduler.scala:27:7]
wire [31:0] io_req_bits_address_0 = io_req_bits_address; // @[Scheduler.scala:27:7]
wire io_in_b_bits_corrupt = 1'h0; // @[Scheduler.scala:27:7]
wire io_out_b_valid = 1'h0; // @[Scheduler.scala:27:7]
wire io_out_b_bits_corrupt = 1'h0; // @[Scheduler.scala:27:7]
wire io_resp_bits_fail = 1'h0; // @[Scheduler.scala:27:7]
wire schedule_x_bits_fail = 1'h0; // @[Mux.scala:30:73]
wire _schedule_WIRE_11_bits_fail = 1'h0; // @[Mux.scala:30:73]
wire _schedule_WIRE_12_fail = 1'h0; // @[Mux.scala:30:73]
wire _schedule_T_196 = 1'h0; // @[Mux.scala:30:73]
wire _schedule_T_197 = 1'h0; // @[Mux.scala:30:73]
wire _schedule_T_198 = 1'h0; // @[Mux.scala:30:73]
wire _schedule_T_199 = 1'h0; // @[Mux.scala:30:73]
wire _schedule_T_200 = 1'h0; // @[Mux.scala:30:73]
wire _schedule_T_201 = 1'h0; // @[Mux.scala:30:73]
wire _schedule_T_202 = 1'h0; // @[Mux.scala:30:73]
wire _schedule_T_203 = 1'h0; // @[Mux.scala:30:73]
wire _schedule_T_204 = 1'h0; // @[Mux.scala:30:73]
wire _schedule_T_205 = 1'h0; // @[Mux.scala:30:73]
wire _schedule_T_206 = 1'h0; // @[Mux.scala:30:73]
wire _schedule_T_207 = 1'h0; // @[Mux.scala:30:73]
wire _schedule_T_208 = 1'h0; // @[Mux.scala:30:73]
wire _schedule_T_209 = 1'h0; // @[Mux.scala:30:73]
wire _schedule_T_210 = 1'h0; // @[Mux.scala:30:73]
wire _schedule_T_211 = 1'h0; // @[Mux.scala:30:73]
wire _schedule_T_212 = 1'h0; // @[Mux.scala:30:73]
wire _schedule_T_213 = 1'h0; // @[Mux.scala:30:73]
wire _schedule_T_214 = 1'h0; // @[Mux.scala:30:73]
wire _schedule_T_215 = 1'h0; // @[Mux.scala:30:73]
wire _schedule_T_216 = 1'h0; // @[Mux.scala:30:73]
wire _schedule_T_217 = 1'h0; // @[Mux.scala:30:73]
wire _schedule_T_218 = 1'h0; // @[Mux.scala:30:73]
wire _schedule_WIRE_13 = 1'h0; // @[Mux.scala:30:73]
wire _schedule_T_574 = 1'h0; // @[Mux.scala:30:73]
wire _schedule_T_575 = 1'h0; // @[Mux.scala:30:73]
wire _schedule_T_598 = 1'h0; // @[Mux.scala:30:73]
wire request_bits_prio_1 = 1'h0; // @[Scheduler.scala:163:21]
wire _request_bits_T_prio_1 = 1'h0; // @[Scheduler.scala:166:22]
wire _request_bits_T_prio_2 = 1'h0; // @[Scheduler.scala:166:22]
wire _request_bits_T_1_prio_1 = 1'h0; // @[Scheduler.scala:165:22]
wire blockB = 1'h0; // @[Scheduler.scala:175:70]
wire nestB = 1'h0; // @[Scheduler.scala:179:70]
wire _view__WIRE_prio_1 = 1'h0; // @[Scheduler.scala:233:95]
wire _view__WIRE_1_prio_1 = 1'h0; // @[Scheduler.scala:233:95]
wire _view__WIRE_2_prio_1 = 1'h0; // @[Scheduler.scala:233:95]
wire _view__WIRE_3_prio_1 = 1'h0; // @[Scheduler.scala:233:95]
wire _view__WIRE_4_prio_1 = 1'h0; // @[Scheduler.scala:233:95]
wire _view__WIRE_5_prio_1 = 1'h0; // @[Scheduler.scala:233:95]
wire _view__WIRE_6_prio_1 = 1'h0; // @[Scheduler.scala:233:95]
wire _view__WIRE_7_prio_1 = 1'h0; // @[Scheduler.scala:233:95]
wire _view__WIRE_8_prio_1 = 1'h0; // @[Scheduler.scala:233:95]
wire _view__WIRE_9_prio_1 = 1'h0; // @[Scheduler.scala:233:95]
wire _view__WIRE_10_prio_1 = 1'h0; // @[Scheduler.scala:233:95]
wire _view__WIRE_11_prio_1 = 1'h0; // @[Scheduler.scala:233:95]
wire _request_alloc_cases_T_4 = 1'h0; // @[Scheduler.scala:259:13]
wire _request_alloc_cases_T_6 = 1'h0; // @[Scheduler.scala:259:56]
wire _request_alloc_cases_T_8 = 1'h0; // @[Scheduler.scala:259:84]
wire [2:0] io_in_b_bits_opcode = 3'h6; // @[Scheduler.scala:27:7]
wire [2:0] io_in_b_bits_size = 3'h6; // @[Scheduler.scala:27:7]
wire [5:0] io_in_b_bits_source = 6'h28; // @[Scheduler.scala:27:7]
wire [15:0] io_in_b_bits_mask = 16'hFFFF; // @[Scheduler.scala:27:7]
wire [127:0] io_in_b_bits_data = 128'h0; // @[Scheduler.scala:27:7]
wire io_in_e_ready = 1'h1; // @[Scheduler.scala:27:7]
wire io_out_b_ready = 1'h1; // @[Scheduler.scala:27:7]
wire io_out_e_ready = 1'h1; // @[Scheduler.scala:27:7]
wire io_resp_ready = 1'h1; // @[Scheduler.scala:27:7]
wire _mshr_request_T_253 = 1'h1; // @[Scheduler.scala:107:28]
wire _request_bits_T_prio_0 = 1'h1; // @[Scheduler.scala:166:22]
wire _queue_T_1 = 1'h1; // @[Scheduler.scala:185:35]
wire _queue_T_5 = 1'h1; // @[Scheduler.scala:185:55]
wire [2:0] io_out_b_bits_opcode = 3'h0; // @[Scheduler.scala:27:7]
wire [2:0] io_out_b_bits_size = 3'h0; // @[Scheduler.scala:27:7]
wire [1:0] io_out_b_bits_param = 2'h0; // @[Scheduler.scala:27:7]
wire [3:0] io_out_b_bits_source = 4'h0; // @[Scheduler.scala:27:7]
wire [3:0] _schedule_WIRE_19_bits_sink = 4'h0; // @[Mux.scala:30:73]
wire [3:0] _schedule_WIRE_20_sink = 4'h0; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_334 = 4'h0; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_335 = 4'h0; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_336 = 4'h0; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_337 = 4'h0; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_338 = 4'h0; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_339 = 4'h0; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_340 = 4'h0; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_341 = 4'h0; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_342 = 4'h0; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_343 = 4'h0; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_344 = 4'h0; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_345 = 4'h0; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_346 = 4'h0; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_347 = 4'h0; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_348 = 4'h0; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_349 = 4'h0; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_350 = 4'h0; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_351 = 4'h0; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_352 = 4'h0; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_353 = 4'h0; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_354 = 4'h0; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_355 = 4'h0; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_356 = 4'h0; // @[Mux.scala:30:73]
wire [3:0] _schedule_WIRE_23 = 4'h0; // @[Mux.scala:30:73]
wire [3:0] _schedule_WIRE_38_bits_source = 4'h0; // @[Mux.scala:30:73]
wire [3:0] _schedule_WIRE_39_source = 4'h0; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_748 = 4'h0; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_749 = 4'h0; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_750 = 4'h0; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_751 = 4'h0; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_752 = 4'h0; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_753 = 4'h0; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_754 = 4'h0; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_755 = 4'h0; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_756 = 4'h0; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_757 = 4'h0; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_758 = 4'h0; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_759 = 4'h0; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_760 = 4'h0; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_761 = 4'h0; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_762 = 4'h0; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_763 = 4'h0; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_764 = 4'h0; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_765 = 4'h0; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_766 = 4'h0; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_767 = 4'h0; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_768 = 4'h0; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_769 = 4'h0; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_770 = 4'h0; // @[Mux.scala:30:73]
wire [3:0] _schedule_WIRE_44 = 4'h0; // @[Mux.scala:30:73]
wire [3:0] _schedule_WIRE_55_bits_source = 4'h0; // @[Mux.scala:30:73]
wire [3:0] _schedule_WIRE_56_source = 4'h0; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_978 = 4'h0; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_979 = 4'h0; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_980 = 4'h0; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_981 = 4'h0; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_982 = 4'h0; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_983 = 4'h0; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_984 = 4'h0; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_985 = 4'h0; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_986 = 4'h0; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_987 = 4'h0; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_988 = 4'h0; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_989 = 4'h0; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_990 = 4'h0; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_991 = 4'h0; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_992 = 4'h0; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_993 = 4'h0; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_994 = 4'h0; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_995 = 4'h0; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_996 = 4'h0; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_997 = 4'h0; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_998 = 4'h0; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_999 = 4'h0; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_1000 = 4'h0; // @[Mux.scala:30:73]
wire [3:0] _schedule_WIRE_58 = 4'h0; // @[Mux.scala:30:73]
wire [31:0] io_out_b_bits_address = 32'h0; // @[Scheduler.scala:27:7]
wire [7:0] io_out_b_bits_mask = 8'h0; // @[Scheduler.scala:27:7]
wire [63:0] io_out_b_bits_data = 64'h0; // @[Scheduler.scala:27:7]
wire [15:0] io_ways_0 = 16'h0; // @[Scheduler.scala:27:7]
wire [15:0] io_ways_1 = 16'h0; // @[Scheduler.scala:27:7]
wire [15:0] io_ways_2 = 16'h0; // @[Scheduler.scala:27:7]
wire [15:0] io_ways_3 = 16'h0; // @[Scheduler.scala:27:7]
wire [15:0] io_ways_4 = 16'h0; // @[Scheduler.scala:27:7]
wire [15:0] io_ways_5 = 16'h0; // @[Scheduler.scala:27:7]
wire [15:0] io_ways_6 = 16'h0; // @[Scheduler.scala:27:7]
wire [15:0] io_ways_7 = 16'h0; // @[Scheduler.scala:27:7]
wire [10:0] io_divs_0 = 11'h0; // @[Scheduler.scala:27:7]
wire [10:0] io_divs_1 = 11'h0; // @[Scheduler.scala:27:7]
wire [10:0] io_divs_2 = 11'h0; // @[Scheduler.scala:27:7]
wire [10:0] io_divs_3 = 11'h0; // @[Scheduler.scala:27:7]
wire [10:0] io_divs_4 = 11'h0; // @[Scheduler.scala:27:7]
wire [10:0] io_divs_5 = 11'h0; // @[Scheduler.scala:27:7]
wire [10:0] io_divs_6 = 11'h0; // @[Scheduler.scala:27:7]
wire [10:0] io_divs_7 = 11'h0; // @[Scheduler.scala:27:7]
wire [11:0] _lowerMatches1_T_1 = 12'h800; // @[Scheduler.scala:200:43]
wire [11:0] _dirTarget_T = 12'h800; // @[Scheduler.scala:306:48]
wire [4:0] _requests_io_push_bits_index_T_43 = 5'h0; // @[Mux.scala:30:73]
wire [9:0] _prioFilter_T_1 = 10'h3FF; // @[Scheduler.scala:182:69]
wire [10:0] _lowerMatches1_T_3 = 11'h400; // @[Scheduler.scala:201:43]
wire io_in_a_ready_0; // @[Scheduler.scala:27:7]
wire [1:0] io_in_b_bits_param_0; // @[Scheduler.scala:27:7]
wire [31:0] io_in_b_bits_address_0; // @[Scheduler.scala:27:7]
wire io_in_b_valid_0; // @[Scheduler.scala:27:7]
wire io_in_c_ready_0; // @[Scheduler.scala:27:7]
wire [2:0] io_in_d_bits_opcode_0; // @[Scheduler.scala:27:7]
wire [1:0] io_in_d_bits_param_0; // @[Scheduler.scala:27:7]
wire [2:0] io_in_d_bits_size_0; // @[Scheduler.scala:27:7]
wire [5:0] io_in_d_bits_source_0; // @[Scheduler.scala:27:7]
wire [3:0] io_in_d_bits_sink_0; // @[Scheduler.scala:27:7]
wire io_in_d_bits_denied_0; // @[Scheduler.scala:27:7]
wire [127:0] io_in_d_bits_data_0; // @[Scheduler.scala:27:7]
wire io_in_d_bits_corrupt_0; // @[Scheduler.scala:27:7]
wire io_in_d_valid_0; // @[Scheduler.scala:27:7]
wire [2:0] io_out_a_bits_opcode_0; // @[Scheduler.scala:27:7]
wire [2:0] io_out_a_bits_param_0; // @[Scheduler.scala:27:7]
wire [2:0] io_out_a_bits_size_0; // @[Scheduler.scala:27:7]
wire [3:0] io_out_a_bits_source_0; // @[Scheduler.scala:27:7]
wire [31:0] io_out_a_bits_address_0; // @[Scheduler.scala:27:7]
wire [7:0] io_out_a_bits_mask_0; // @[Scheduler.scala:27:7]
wire [63:0] io_out_a_bits_data_0; // @[Scheduler.scala:27:7]
wire io_out_a_bits_corrupt_0; // @[Scheduler.scala:27:7]
wire io_out_a_valid_0; // @[Scheduler.scala:27:7]
wire [2:0] io_out_c_bits_opcode_0; // @[Scheduler.scala:27:7]
wire [2:0] io_out_c_bits_param_0; // @[Scheduler.scala:27:7]
wire [2:0] io_out_c_bits_size_0; // @[Scheduler.scala:27:7]
wire [3:0] io_out_c_bits_source_0; // @[Scheduler.scala:27:7]
wire [31:0] io_out_c_bits_address_0; // @[Scheduler.scala:27:7]
wire [63:0] io_out_c_bits_data_0; // @[Scheduler.scala:27:7]
wire io_out_c_bits_corrupt_0; // @[Scheduler.scala:27:7]
wire io_out_c_valid_0; // @[Scheduler.scala:27:7]
wire io_out_d_ready_0; // @[Scheduler.scala:27:7]
wire [2:0] io_out_e_bits_sink_0; // @[Scheduler.scala:27:7]
wire io_out_e_valid_0; // @[Scheduler.scala:27:7]
wire io_req_ready_0; // @[Scheduler.scala:27:7]
wire io_resp_valid_0; // @[Scheduler.scala:27:7]
wire [10:0] _nestedwb_set_T; // @[Scheduler.scala:155:24]
wire [8:0] _nestedwb_tag_T; // @[Scheduler.scala:156:24]
wire _nestedwb_b_toN_T_2; // @[Scheduler.scala:157:75]
wire _nestedwb_b_toB_T_2; // @[Scheduler.scala:158:75]
wire _nestedwb_b_clr_dirty_T; // @[Scheduler.scala:159:37]
wire _nestedwb_c_set_dirty_T_1; // @[Scheduler.scala:160:75]
wire [10:0] nestedwb_set; // @[Scheduler.scala:75:22]
wire [8:0] nestedwb_tag; // @[Scheduler.scala:75:22]
wire nestedwb_b_toN; // @[Scheduler.scala:75:22]
wire nestedwb_b_toB; // @[Scheduler.scala:75:22]
wire nestedwb_b_clr_dirty; // @[Scheduler.scala:75:22]
wire nestedwb_c_set_dirty; // @[Scheduler.scala:75:22]
wire _mshrs_0_io_sinkc_valid_T = _sinkC_io_resp_bits_set == _mshrs_0_io_status_bits_set; // @[Scheduler.scala:55:21, :71:46, :79:71]
wire _mshrs_0_io_sinkc_valid_T_1 = _sinkC_io_resp_valid & _mshrs_0_io_sinkc_valid_T; // @[Scheduler.scala:55:21, :79:{45,71}]
wire _mshrs_0_io_sinkd_valid_T = _sinkD_io_resp_bits_source == 4'h0; // @[Scheduler.scala:56:21, :80:74]
wire _mshrs_0_io_sinkd_valid_T_1 = _sinkD_io_resp_valid & _mshrs_0_io_sinkd_valid_T; // @[Scheduler.scala:56:21, :80:{45,74}]
wire _mshrs_0_io_sinke_valid_T = _sinkE_io_resp_bits_sink == 4'h0; // @[Scheduler.scala:57:21, :81:74]
wire _mshrs_0_io_sinke_valid_T_1 = _sinkE_io_resp_valid & _mshrs_0_io_sinke_valid_T; // @[Scheduler.scala:57:21, :81:{45,74}]
wire _mshrs_1_io_sinkc_valid_T = _sinkC_io_resp_bits_set == _mshrs_1_io_status_bits_set; // @[Scheduler.scala:55:21, :71:46, :79:71]
wire _mshrs_1_io_sinkc_valid_T_1 = _sinkC_io_resp_valid & _mshrs_1_io_sinkc_valid_T; // @[Scheduler.scala:55:21, :79:{45,71}]
wire _mshrs_1_io_sinkd_valid_T = _sinkD_io_resp_bits_source == 4'h1; // @[Scheduler.scala:56:21, :80:74]
wire _mshrs_1_io_sinkd_valid_T_1 = _sinkD_io_resp_valid & _mshrs_1_io_sinkd_valid_T; // @[Scheduler.scala:56:21, :80:{45,74}]
wire _mshrs_1_io_sinke_valid_T = _sinkE_io_resp_bits_sink == 4'h1; // @[Scheduler.scala:57:21, :81:74]
wire _mshrs_1_io_sinke_valid_T_1 = _sinkE_io_resp_valid & _mshrs_1_io_sinke_valid_T; // @[Scheduler.scala:57:21, :81:{45,74}]
wire _mshrs_2_io_sinkc_valid_T = _sinkC_io_resp_bits_set == _mshrs_2_io_status_bits_set; // @[Scheduler.scala:55:21, :71:46, :79:71]
wire _mshrs_2_io_sinkc_valid_T_1 = _sinkC_io_resp_valid & _mshrs_2_io_sinkc_valid_T; // @[Scheduler.scala:55:21, :79:{45,71}]
wire _mshrs_2_io_sinkd_valid_T = _sinkD_io_resp_bits_source == 4'h2; // @[Scheduler.scala:56:21, :80:74]
wire _mshrs_2_io_sinkd_valid_T_1 = _sinkD_io_resp_valid & _mshrs_2_io_sinkd_valid_T; // @[Scheduler.scala:56:21, :80:{45,74}]
wire _mshrs_2_io_sinke_valid_T = _sinkE_io_resp_bits_sink == 4'h2; // @[Scheduler.scala:57:21, :81:74]
wire _mshrs_2_io_sinke_valid_T_1 = _sinkE_io_resp_valid & _mshrs_2_io_sinke_valid_T; // @[Scheduler.scala:57:21, :81:{45,74}]
wire _mshrs_3_io_sinkc_valid_T = _sinkC_io_resp_bits_set == _mshrs_3_io_status_bits_set; // @[Scheduler.scala:55:21, :71:46, :79:71]
wire _mshrs_3_io_sinkc_valid_T_1 = _sinkC_io_resp_valid & _mshrs_3_io_sinkc_valid_T; // @[Scheduler.scala:55:21, :79:{45,71}]
wire _mshrs_3_io_sinkd_valid_T = _sinkD_io_resp_bits_source == 4'h3; // @[Scheduler.scala:56:21, :80:74]
wire _mshrs_3_io_sinkd_valid_T_1 = _sinkD_io_resp_valid & _mshrs_3_io_sinkd_valid_T; // @[Scheduler.scala:56:21, :80:{45,74}]
wire _mshrs_3_io_sinke_valid_T = _sinkE_io_resp_bits_sink == 4'h3; // @[Scheduler.scala:57:21, :81:74]
wire _mshrs_3_io_sinke_valid_T_1 = _sinkE_io_resp_valid & _mshrs_3_io_sinke_valid_T; // @[Scheduler.scala:57:21, :81:{45,74}]
wire _mshrs_4_io_sinkc_valid_T = _sinkC_io_resp_bits_set == _mshrs_4_io_status_bits_set; // @[Scheduler.scala:55:21, :71:46, :79:71]
wire _mshrs_4_io_sinkc_valid_T_1 = _sinkC_io_resp_valid & _mshrs_4_io_sinkc_valid_T; // @[Scheduler.scala:55:21, :79:{45,71}]
wire _mshrs_4_io_sinkd_valid_T = _sinkD_io_resp_bits_source == 4'h4; // @[Scheduler.scala:56:21, :80:74]
wire _mshrs_4_io_sinkd_valid_T_1 = _sinkD_io_resp_valid & _mshrs_4_io_sinkd_valid_T; // @[Scheduler.scala:56:21, :80:{45,74}]
wire _mshrs_4_io_sinke_valid_T = _sinkE_io_resp_bits_sink == 4'h4; // @[Scheduler.scala:57:21, :81:74]
wire _mshrs_4_io_sinke_valid_T_1 = _sinkE_io_resp_valid & _mshrs_4_io_sinke_valid_T; // @[Scheduler.scala:57:21, :81:{45,74}]
wire _mshrs_5_io_sinkc_valid_T = _sinkC_io_resp_bits_set == _mshrs_5_io_status_bits_set; // @[Scheduler.scala:55:21, :71:46, :79:71]
wire _mshrs_5_io_sinkc_valid_T_1 = _sinkC_io_resp_valid & _mshrs_5_io_sinkc_valid_T; // @[Scheduler.scala:55:21, :79:{45,71}]
wire _mshrs_5_io_sinkd_valid_T = _sinkD_io_resp_bits_source == 4'h5; // @[Scheduler.scala:56:21, :80:74]
wire _mshrs_5_io_sinkd_valid_T_1 = _sinkD_io_resp_valid & _mshrs_5_io_sinkd_valid_T; // @[Scheduler.scala:56:21, :80:{45,74}]
wire _mshrs_5_io_sinke_valid_T = _sinkE_io_resp_bits_sink == 4'h5; // @[Scheduler.scala:57:21, :81:74]
wire _mshrs_5_io_sinke_valid_T_1 = _sinkE_io_resp_valid & _mshrs_5_io_sinke_valid_T; // @[Scheduler.scala:57:21, :81:{45,74}]
wire _mshrs_6_io_sinkc_valid_T = _sinkC_io_resp_bits_set == _mshrs_6_io_status_bits_set; // @[Scheduler.scala:55:21, :71:46, :79:71]
wire _mshrs_6_io_sinkc_valid_T_1 = _sinkC_io_resp_valid & _mshrs_6_io_sinkc_valid_T; // @[Scheduler.scala:55:21, :79:{45,71}]
wire _mshrs_6_io_sinkd_valid_T = _sinkD_io_resp_bits_source == 4'h6; // @[Scheduler.scala:56:21, :80:74]
wire _mshrs_6_io_sinkd_valid_T_1 = _sinkD_io_resp_valid & _mshrs_6_io_sinkd_valid_T; // @[Scheduler.scala:56:21, :80:{45,74}]
wire _mshrs_6_io_sinke_valid_T = _sinkE_io_resp_bits_sink == 4'h6; // @[Scheduler.scala:57:21, :81:74]
wire _mshrs_6_io_sinke_valid_T_1 = _sinkE_io_resp_valid & _mshrs_6_io_sinke_valid_T; // @[Scheduler.scala:57:21, :81:{45,74}]
wire _mshrs_7_io_sinkc_valid_T = _sinkC_io_resp_bits_set == _mshrs_7_io_status_bits_set; // @[Scheduler.scala:55:21, :71:46, :79:71]
wire _mshrs_7_io_sinkc_valid_T_1 = _sinkC_io_resp_valid & _mshrs_7_io_sinkc_valid_T; // @[Scheduler.scala:55:21, :79:{45,71}]
wire _mshrs_7_io_sinkd_valid_T = _sinkD_io_resp_bits_source == 4'h7; // @[Scheduler.scala:56:21, :80:74]
wire _mshrs_7_io_sinkd_valid_T_1 = _sinkD_io_resp_valid & _mshrs_7_io_sinkd_valid_T; // @[Scheduler.scala:56:21, :80:{45,74}]
wire _mshrs_7_io_sinke_valid_T = _sinkE_io_resp_bits_sink == 4'h7; // @[Scheduler.scala:57:21, :81:74]
wire _mshrs_7_io_sinke_valid_T_1 = _sinkE_io_resp_valid & _mshrs_7_io_sinke_valid_T; // @[Scheduler.scala:57:21, :81:{45,74}]
wire _mshrs_8_io_sinkc_valid_T = _sinkC_io_resp_bits_set == _mshrs_8_io_status_bits_set; // @[Scheduler.scala:55:21, :71:46, :79:71]
wire _mshrs_8_io_sinkc_valid_T_1 = _sinkC_io_resp_valid & _mshrs_8_io_sinkc_valid_T; // @[Scheduler.scala:55:21, :79:{45,71}]
wire _mshrs_8_io_sinkd_valid_T = _sinkD_io_resp_bits_source == 4'h8; // @[Scheduler.scala:56:21, :80:74]
wire _mshrs_8_io_sinkd_valid_T_1 = _sinkD_io_resp_valid & _mshrs_8_io_sinkd_valid_T; // @[Scheduler.scala:56:21, :80:{45,74}]
wire _mshrs_8_io_sinke_valid_T = _sinkE_io_resp_bits_sink == 4'h8; // @[Scheduler.scala:57:21, :81:74]
wire _mshrs_8_io_sinke_valid_T_1 = _sinkE_io_resp_valid & _mshrs_8_io_sinke_valid_T; // @[Scheduler.scala:57:21, :81:{45,74}]
wire _mshrs_9_io_sinkc_valid_T = _sinkC_io_resp_bits_set == _mshrs_9_io_status_bits_set; // @[Scheduler.scala:55:21, :71:46, :79:71]
wire _mshrs_9_io_sinkc_valid_T_1 = _sinkC_io_resp_valid & _mshrs_9_io_sinkc_valid_T; // @[Scheduler.scala:55:21, :79:{45,71}]
wire _mshrs_9_io_sinkd_valid_T = _sinkD_io_resp_bits_source == 4'h9; // @[Scheduler.scala:56:21, :80:74]
wire _mshrs_9_io_sinkd_valid_T_1 = _sinkD_io_resp_valid & _mshrs_9_io_sinkd_valid_T; // @[Scheduler.scala:56:21, :80:{45,74}]
wire _mshrs_9_io_sinke_valid_T = _sinkE_io_resp_bits_sink == 4'h9; // @[Scheduler.scala:57:21, :81:74]
wire _mshrs_9_io_sinke_valid_T_1 = _sinkE_io_resp_valid & _mshrs_9_io_sinke_valid_T; // @[Scheduler.scala:57:21, :81:{45,74}]
wire _mshrs_10_io_sinkc_valid_T = _sinkC_io_resp_bits_set == _mshrs_10_io_status_bits_set; // @[Scheduler.scala:55:21, :71:46, :79:71]
wire _mshrs_10_io_sinkc_valid_T_1 = _sinkC_io_resp_valid & _mshrs_10_io_sinkc_valid_T; // @[Scheduler.scala:55:21, :79:{45,71}]
wire _mshrs_10_io_sinkd_valid_T = _sinkD_io_resp_bits_source == 4'hA; // @[Scheduler.scala:56:21, :80:74]
wire _mshrs_10_io_sinkd_valid_T_1 = _sinkD_io_resp_valid & _mshrs_10_io_sinkd_valid_T; // @[Scheduler.scala:56:21, :80:{45,74}]
wire _mshrs_10_io_sinke_valid_T = _sinkE_io_resp_bits_sink == 4'hA; // @[Scheduler.scala:57:21, :81:74]
wire _mshrs_10_io_sinke_valid_T_1 = _sinkE_io_resp_valid & _mshrs_10_io_sinke_valid_T; // @[Scheduler.scala:57:21, :81:{45,74}]
wire _mshrs_11_io_sinkc_valid_T = _sinkC_io_resp_bits_set == _mshrs_11_io_status_bits_set; // @[Scheduler.scala:55:21, :71:46, :79:71]
wire _mshrs_11_io_sinkc_valid_T_1 = _sinkC_io_resp_valid & _mshrs_11_io_sinkc_valid_T; // @[Scheduler.scala:55:21, :79:{45,71}]
wire _mshrs_11_io_sinkd_valid_T = _sinkD_io_resp_bits_source == 4'hB; // @[Scheduler.scala:56:21, :80:74]
wire _mshrs_11_io_sinkd_valid_T_1 = _sinkD_io_resp_valid & _mshrs_11_io_sinkd_valid_T; // @[Scheduler.scala:56:21, :80:{45,74}]
wire _mshrs_11_io_sinke_valid_T = _sinkE_io_resp_bits_sink == 4'hB; // @[Scheduler.scala:57:21, :81:74]
wire _mshrs_11_io_sinke_valid_T_1 = _sinkE_io_resp_valid & _mshrs_11_io_sinke_valid_T; // @[Scheduler.scala:57:21, :81:{45,74}]
wire _mshr_stall_abc_T = _mshrs_0_io_status_bits_set == _mshrs_10_io_status_bits_set; // @[Scheduler.scala:71:46, :90:54]
wire _mshr_stall_abc_T_1 = _mshrs_10_io_status_valid & _mshr_stall_abc_T; // @[Scheduler.scala:71:46, :90:{30,54}]
wire _mshr_stall_abc_T_2 = _mshrs_0_io_status_bits_set == _mshrs_11_io_status_bits_set; // @[Scheduler.scala:71:46, :91:54]
wire _mshr_stall_abc_T_3 = _mshrs_11_io_status_valid & _mshr_stall_abc_T_2; // @[Scheduler.scala:71:46, :91:{30,54}]
wire mshr_stall_abc_0 = _mshr_stall_abc_T_1 | _mshr_stall_abc_T_3; // @[Scheduler.scala:90:{30,86}, :91:30]
wire _mshr_stall_abc_T_4 = _mshrs_1_io_status_bits_set == _mshrs_10_io_status_bits_set; // @[Scheduler.scala:71:46, :90:54]
wire _mshr_stall_abc_T_5 = _mshrs_10_io_status_valid & _mshr_stall_abc_T_4; // @[Scheduler.scala:71:46, :90:{30,54}]
wire _mshr_stall_abc_T_6 = _mshrs_1_io_status_bits_set == _mshrs_11_io_status_bits_set; // @[Scheduler.scala:71:46, :91:54]
wire _mshr_stall_abc_T_7 = _mshrs_11_io_status_valid & _mshr_stall_abc_T_6; // @[Scheduler.scala:71:46, :91:{30,54}]
wire mshr_stall_abc_1 = _mshr_stall_abc_T_5 | _mshr_stall_abc_T_7; // @[Scheduler.scala:90:{30,86}, :91:30]
wire _mshr_stall_abc_T_8 = _mshrs_2_io_status_bits_set == _mshrs_10_io_status_bits_set; // @[Scheduler.scala:71:46, :90:54]
wire _mshr_stall_abc_T_9 = _mshrs_10_io_status_valid & _mshr_stall_abc_T_8; // @[Scheduler.scala:71:46, :90:{30,54}]
wire _mshr_stall_abc_T_10 = _mshrs_2_io_status_bits_set == _mshrs_11_io_status_bits_set; // @[Scheduler.scala:71:46, :91:54]
wire _mshr_stall_abc_T_11 = _mshrs_11_io_status_valid & _mshr_stall_abc_T_10; // @[Scheduler.scala:71:46, :91:{30,54}]
wire mshr_stall_abc_2 = _mshr_stall_abc_T_9 | _mshr_stall_abc_T_11; // @[Scheduler.scala:90:{30,86}, :91:30]
wire _mshr_stall_abc_T_12 = _mshrs_3_io_status_bits_set == _mshrs_10_io_status_bits_set; // @[Scheduler.scala:71:46, :90:54]
wire _mshr_stall_abc_T_13 = _mshrs_10_io_status_valid & _mshr_stall_abc_T_12; // @[Scheduler.scala:71:46, :90:{30,54}]
wire _mshr_stall_abc_T_14 = _mshrs_3_io_status_bits_set == _mshrs_11_io_status_bits_set; // @[Scheduler.scala:71:46, :91:54]
wire _mshr_stall_abc_T_15 = _mshrs_11_io_status_valid & _mshr_stall_abc_T_14; // @[Scheduler.scala:71:46, :91:{30,54}]
wire mshr_stall_abc_3 = _mshr_stall_abc_T_13 | _mshr_stall_abc_T_15; // @[Scheduler.scala:90:{30,86}, :91:30]
wire _mshr_stall_abc_T_16 = _mshrs_4_io_status_bits_set == _mshrs_10_io_status_bits_set; // @[Scheduler.scala:71:46, :90:54]
wire _mshr_stall_abc_T_17 = _mshrs_10_io_status_valid & _mshr_stall_abc_T_16; // @[Scheduler.scala:71:46, :90:{30,54}]
wire _mshr_stall_abc_T_18 = _mshrs_4_io_status_bits_set == _mshrs_11_io_status_bits_set; // @[Scheduler.scala:71:46, :91:54]
wire _mshr_stall_abc_T_19 = _mshrs_11_io_status_valid & _mshr_stall_abc_T_18; // @[Scheduler.scala:71:46, :91:{30,54}]
wire mshr_stall_abc_4 = _mshr_stall_abc_T_17 | _mshr_stall_abc_T_19; // @[Scheduler.scala:90:{30,86}, :91:30]
wire _mshr_stall_abc_T_20 = _mshrs_5_io_status_bits_set == _mshrs_10_io_status_bits_set; // @[Scheduler.scala:71:46, :90:54]
wire _mshr_stall_abc_T_21 = _mshrs_10_io_status_valid & _mshr_stall_abc_T_20; // @[Scheduler.scala:71:46, :90:{30,54}]
wire _mshr_stall_abc_T_22 = _mshrs_5_io_status_bits_set == _mshrs_11_io_status_bits_set; // @[Scheduler.scala:71:46, :91:54]
wire _mshr_stall_abc_T_23 = _mshrs_11_io_status_valid & _mshr_stall_abc_T_22; // @[Scheduler.scala:71:46, :91:{30,54}]
wire mshr_stall_abc_5 = _mshr_stall_abc_T_21 | _mshr_stall_abc_T_23; // @[Scheduler.scala:90:{30,86}, :91:30]
wire _mshr_stall_abc_T_24 = _mshrs_6_io_status_bits_set == _mshrs_10_io_status_bits_set; // @[Scheduler.scala:71:46, :90:54]
wire _mshr_stall_abc_T_25 = _mshrs_10_io_status_valid & _mshr_stall_abc_T_24; // @[Scheduler.scala:71:46, :90:{30,54}]
wire _mshr_stall_abc_T_26 = _mshrs_6_io_status_bits_set == _mshrs_11_io_status_bits_set; // @[Scheduler.scala:71:46, :91:54]
wire _mshr_stall_abc_T_27 = _mshrs_11_io_status_valid & _mshr_stall_abc_T_26; // @[Scheduler.scala:71:46, :91:{30,54}]
wire mshr_stall_abc_6 = _mshr_stall_abc_T_25 | _mshr_stall_abc_T_27; // @[Scheduler.scala:90:{30,86}, :91:30]
wire _mshr_stall_abc_T_28 = _mshrs_7_io_status_bits_set == _mshrs_10_io_status_bits_set; // @[Scheduler.scala:71:46, :90:54]
wire _mshr_stall_abc_T_29 = _mshrs_10_io_status_valid & _mshr_stall_abc_T_28; // @[Scheduler.scala:71:46, :90:{30,54}]
wire _mshr_stall_abc_T_30 = _mshrs_7_io_status_bits_set == _mshrs_11_io_status_bits_set; // @[Scheduler.scala:71:46, :91:54]
wire _mshr_stall_abc_T_31 = _mshrs_11_io_status_valid & _mshr_stall_abc_T_30; // @[Scheduler.scala:71:46, :91:{30,54}]
wire mshr_stall_abc_7 = _mshr_stall_abc_T_29 | _mshr_stall_abc_T_31; // @[Scheduler.scala:90:{30,86}, :91:30]
wire _mshr_stall_abc_T_32 = _mshrs_8_io_status_bits_set == _mshrs_10_io_status_bits_set; // @[Scheduler.scala:71:46, :90:54]
wire _mshr_stall_abc_T_33 = _mshrs_10_io_status_valid & _mshr_stall_abc_T_32; // @[Scheduler.scala:71:46, :90:{30,54}]
wire _mshr_stall_abc_T_34 = _mshrs_8_io_status_bits_set == _mshrs_11_io_status_bits_set; // @[Scheduler.scala:71:46, :91:54]
wire _mshr_stall_abc_T_35 = _mshrs_11_io_status_valid & _mshr_stall_abc_T_34; // @[Scheduler.scala:71:46, :91:{30,54}]
wire mshr_stall_abc_8 = _mshr_stall_abc_T_33 | _mshr_stall_abc_T_35; // @[Scheduler.scala:90:{30,86}, :91:30]
wire _mshr_stall_abc_T_36 = _mshrs_9_io_status_bits_set == _mshrs_10_io_status_bits_set; // @[Scheduler.scala:71:46, :90:54]
wire _mshr_stall_abc_T_37 = _mshrs_10_io_status_valid & _mshr_stall_abc_T_36; // @[Scheduler.scala:71:46, :90:{30,54}]
wire _mshr_stall_abc_T_38 = _mshrs_9_io_status_bits_set == _mshrs_11_io_status_bits_set; // @[Scheduler.scala:71:46, :91:54]
wire _mshr_stall_abc_T_39 = _mshrs_11_io_status_valid & _mshr_stall_abc_T_38; // @[Scheduler.scala:71:46, :91:{30,54}]
wire mshr_stall_abc_9 = _mshr_stall_abc_T_37 | _mshr_stall_abc_T_39; // @[Scheduler.scala:90:{30,86}, :91:30]
wire _mshr_stall_bc_T = _mshrs_10_io_status_bits_set == _mshrs_11_io_status_bits_set; // @[Scheduler.scala:71:46, :94:58]
wire mshr_stall_bc = _mshrs_11_io_status_valid & _mshr_stall_bc_T; // @[Scheduler.scala:71:46, :94:{28,58}]
wire stall_abc_0 = mshr_stall_abc_0 & _mshrs_0_io_status_valid; // @[Scheduler.scala:71:46, :90:86, :99:73]
wire stall_abc_1 = mshr_stall_abc_1 & _mshrs_1_io_status_valid; // @[Scheduler.scala:71:46, :90:86, :99:73]
wire stall_abc_2 = mshr_stall_abc_2 & _mshrs_2_io_status_valid; // @[Scheduler.scala:71:46, :90:86, :99:73]
wire stall_abc_3 = mshr_stall_abc_3 & _mshrs_3_io_status_valid; // @[Scheduler.scala:71:46, :90:86, :99:73]
wire stall_abc_4 = mshr_stall_abc_4 & _mshrs_4_io_status_valid; // @[Scheduler.scala:71:46, :90:86, :99:73]
wire stall_abc_5 = mshr_stall_abc_5 & _mshrs_5_io_status_valid; // @[Scheduler.scala:71:46, :90:86, :99:73]
wire stall_abc_6 = mshr_stall_abc_6 & _mshrs_6_io_status_valid; // @[Scheduler.scala:71:46, :90:86, :99:73]
wire stall_abc_7 = mshr_stall_abc_7 & _mshrs_7_io_status_valid; // @[Scheduler.scala:71:46, :90:86, :99:73]
wire stall_abc_8 = mshr_stall_abc_8 & _mshrs_8_io_status_valid; // @[Scheduler.scala:71:46, :90:86, :99:73]
wire stall_abc_9 = mshr_stall_abc_9 & _mshrs_9_io_status_valid; // @[Scheduler.scala:71:46, :90:86, :99:73]
wire _mshr_request_T = ~mshr_stall_abc_0; // @[Scheduler.scala:90:86, :107:28]
wire _mshr_request_T_1 = _mshrs_0_io_schedule_valid & _mshr_request_T; // @[Scheduler.scala:71:46, :107:{25,28}]
wire _mshr_request_T_2 = ~_mshrs_0_io_schedule_bits_a_valid; // @[Scheduler.scala:71:46, :108:32]
wire _mshr_request_T_3 = _sourceA_io_req_ready | _mshr_request_T_2; // @[Scheduler.scala:40:23, :108:{29,32}]
wire _mshr_request_T_4 = _mshr_request_T_1 & _mshr_request_T_3; // @[Scheduler.scala:107:{25,31}, :108:29]
wire _mshr_request_T_5 = ~_mshrs_0_io_schedule_bits_b_valid; // @[Scheduler.scala:71:46, :109:32]
wire _mshr_request_T_6 = _sourceB_io_req_ready | _mshr_request_T_5; // @[Scheduler.scala:41:23, :109:{29,32}]
wire _mshr_request_T_7 = _mshr_request_T_4 & _mshr_request_T_6; // @[Scheduler.scala:107:31, :108:61, :109:29]
wire _mshr_request_T_8 = ~_mshrs_0_io_schedule_bits_c_valid; // @[Scheduler.scala:71:46, :110:32]
wire _mshr_request_T_9 = _sourceC_io_req_ready | _mshr_request_T_8; // @[Scheduler.scala:42:23, :110:{29,32}]
wire _mshr_request_T_10 = _mshr_request_T_7 & _mshr_request_T_9; // @[Scheduler.scala:108:61, :109:61, :110:29]
wire _mshr_request_T_11 = ~_mshrs_0_io_schedule_bits_d_valid; // @[Scheduler.scala:71:46, :111:32]
wire _mshr_request_T_12 = _sourceD_io_req_ready | _mshr_request_T_11; // @[Scheduler.scala:43:23, :111:{29,32}]
wire _mshr_request_T_13 = _mshr_request_T_10 & _mshr_request_T_12; // @[Scheduler.scala:109:61, :110:61, :111:29]
wire _mshr_request_T_14 = ~_mshrs_0_io_schedule_bits_e_valid; // @[Scheduler.scala:71:46, :112:32]
wire _mshr_request_T_15 = _sourceE_io_req_ready | _mshr_request_T_14; // @[Scheduler.scala:44:23, :112:{29,32}]
wire _mshr_request_T_16 = _mshr_request_T_13 & _mshr_request_T_15; // @[Scheduler.scala:110:61, :111:61, :112:29]
wire _mshr_request_T_17 = ~_mshrs_0_io_schedule_bits_x_valid; // @[Scheduler.scala:71:46, :113:32]
wire _mshr_request_T_18 = _sourceX_io_req_ready | _mshr_request_T_17; // @[Scheduler.scala:45:23, :113:{29,32}]
wire _mshr_request_T_19 = _mshr_request_T_16 & _mshr_request_T_18; // @[Scheduler.scala:111:61, :112:61, :113:29]
wire _mshr_request_T_20 = ~_mshrs_0_io_schedule_bits_dir_valid; // @[Scheduler.scala:71:46, :114:36]
wire _mshr_request_T_21 = _directory_io_write_ready | _mshr_request_T_20; // @[Scheduler.scala:68:25, :114:{33,36}]
wire _mshr_request_T_22 = _mshr_request_T_19 & _mshr_request_T_21; // @[Scheduler.scala:112:61, :113:61, :114:33]
wire _mshr_request_T_23 = ~mshr_stall_abc_1; // @[Scheduler.scala:90:86, :107:28]
wire _mshr_request_T_24 = _mshrs_1_io_schedule_valid & _mshr_request_T_23; // @[Scheduler.scala:71:46, :107:{25,28}]
wire _mshr_request_T_25 = ~_mshrs_1_io_schedule_bits_a_valid; // @[Scheduler.scala:71:46, :108:32]
wire _mshr_request_T_26 = _sourceA_io_req_ready | _mshr_request_T_25; // @[Scheduler.scala:40:23, :108:{29,32}]
wire _mshr_request_T_27 = _mshr_request_T_24 & _mshr_request_T_26; // @[Scheduler.scala:107:{25,31}, :108:29]
wire _mshr_request_T_28 = ~_mshrs_1_io_schedule_bits_b_valid; // @[Scheduler.scala:71:46, :109:32]
wire _mshr_request_T_29 = _sourceB_io_req_ready | _mshr_request_T_28; // @[Scheduler.scala:41:23, :109:{29,32}]
wire _mshr_request_T_30 = _mshr_request_T_27 & _mshr_request_T_29; // @[Scheduler.scala:107:31, :108:61, :109:29]
wire _mshr_request_T_31 = ~_mshrs_1_io_schedule_bits_c_valid; // @[Scheduler.scala:71:46, :110:32]
wire _mshr_request_T_32 = _sourceC_io_req_ready | _mshr_request_T_31; // @[Scheduler.scala:42:23, :110:{29,32}]
wire _mshr_request_T_33 = _mshr_request_T_30 & _mshr_request_T_32; // @[Scheduler.scala:108:61, :109:61, :110:29]
wire _mshr_request_T_34 = ~_mshrs_1_io_schedule_bits_d_valid; // @[Scheduler.scala:71:46, :111:32]
wire _mshr_request_T_35 = _sourceD_io_req_ready | _mshr_request_T_34; // @[Scheduler.scala:43:23, :111:{29,32}]
wire _mshr_request_T_36 = _mshr_request_T_33 & _mshr_request_T_35; // @[Scheduler.scala:109:61, :110:61, :111:29]
wire _mshr_request_T_37 = ~_mshrs_1_io_schedule_bits_e_valid; // @[Scheduler.scala:71:46, :112:32]
wire _mshr_request_T_38 = _sourceE_io_req_ready | _mshr_request_T_37; // @[Scheduler.scala:44:23, :112:{29,32}]
wire _mshr_request_T_39 = _mshr_request_T_36 & _mshr_request_T_38; // @[Scheduler.scala:110:61, :111:61, :112:29]
wire _mshr_request_T_40 = ~_mshrs_1_io_schedule_bits_x_valid; // @[Scheduler.scala:71:46, :113:32]
wire _mshr_request_T_41 = _sourceX_io_req_ready | _mshr_request_T_40; // @[Scheduler.scala:45:23, :113:{29,32}]
wire _mshr_request_T_42 = _mshr_request_T_39 & _mshr_request_T_41; // @[Scheduler.scala:111:61, :112:61, :113:29]
wire _mshr_request_T_43 = ~_mshrs_1_io_schedule_bits_dir_valid; // @[Scheduler.scala:71:46, :114:36]
wire _mshr_request_T_44 = _directory_io_write_ready | _mshr_request_T_43; // @[Scheduler.scala:68:25, :114:{33,36}]
wire _mshr_request_T_45 = _mshr_request_T_42 & _mshr_request_T_44; // @[Scheduler.scala:112:61, :113:61, :114:33]
wire _mshr_request_T_46 = ~mshr_stall_abc_2; // @[Scheduler.scala:90:86, :107:28]
wire _mshr_request_T_47 = _mshrs_2_io_schedule_valid & _mshr_request_T_46; // @[Scheduler.scala:71:46, :107:{25,28}]
wire _mshr_request_T_48 = ~_mshrs_2_io_schedule_bits_a_valid; // @[Scheduler.scala:71:46, :108:32]
wire _mshr_request_T_49 = _sourceA_io_req_ready | _mshr_request_T_48; // @[Scheduler.scala:40:23, :108:{29,32}]
wire _mshr_request_T_50 = _mshr_request_T_47 & _mshr_request_T_49; // @[Scheduler.scala:107:{25,31}, :108:29]
wire _mshr_request_T_51 = ~_mshrs_2_io_schedule_bits_b_valid; // @[Scheduler.scala:71:46, :109:32]
wire _mshr_request_T_52 = _sourceB_io_req_ready | _mshr_request_T_51; // @[Scheduler.scala:41:23, :109:{29,32}]
wire _mshr_request_T_53 = _mshr_request_T_50 & _mshr_request_T_52; // @[Scheduler.scala:107:31, :108:61, :109:29]
wire _mshr_request_T_54 = ~_mshrs_2_io_schedule_bits_c_valid; // @[Scheduler.scala:71:46, :110:32]
wire _mshr_request_T_55 = _sourceC_io_req_ready | _mshr_request_T_54; // @[Scheduler.scala:42:23, :110:{29,32}]
wire _mshr_request_T_56 = _mshr_request_T_53 & _mshr_request_T_55; // @[Scheduler.scala:108:61, :109:61, :110:29]
wire _mshr_request_T_57 = ~_mshrs_2_io_schedule_bits_d_valid; // @[Scheduler.scala:71:46, :111:32]
wire _mshr_request_T_58 = _sourceD_io_req_ready | _mshr_request_T_57; // @[Scheduler.scala:43:23, :111:{29,32}]
wire _mshr_request_T_59 = _mshr_request_T_56 & _mshr_request_T_58; // @[Scheduler.scala:109:61, :110:61, :111:29]
wire _mshr_request_T_60 = ~_mshrs_2_io_schedule_bits_e_valid; // @[Scheduler.scala:71:46, :112:32]
wire _mshr_request_T_61 = _sourceE_io_req_ready | _mshr_request_T_60; // @[Scheduler.scala:44:23, :112:{29,32}]
wire _mshr_request_T_62 = _mshr_request_T_59 & _mshr_request_T_61; // @[Scheduler.scala:110:61, :111:61, :112:29]
wire _mshr_request_T_63 = ~_mshrs_2_io_schedule_bits_x_valid; // @[Scheduler.scala:71:46, :113:32]
wire _mshr_request_T_64 = _sourceX_io_req_ready | _mshr_request_T_63; // @[Scheduler.scala:45:23, :113:{29,32}]
wire _mshr_request_T_65 = _mshr_request_T_62 & _mshr_request_T_64; // @[Scheduler.scala:111:61, :112:61, :113:29]
wire _mshr_request_T_66 = ~_mshrs_2_io_schedule_bits_dir_valid; // @[Scheduler.scala:71:46, :114:36]
wire _mshr_request_T_67 = _directory_io_write_ready | _mshr_request_T_66; // @[Scheduler.scala:68:25, :114:{33,36}]
wire _mshr_request_T_68 = _mshr_request_T_65 & _mshr_request_T_67; // @[Scheduler.scala:112:61, :113:61, :114:33]
wire _mshr_request_T_69 = ~mshr_stall_abc_3; // @[Scheduler.scala:90:86, :107:28]
wire _mshr_request_T_70 = _mshrs_3_io_schedule_valid & _mshr_request_T_69; // @[Scheduler.scala:71:46, :107:{25,28}]
wire _mshr_request_T_71 = ~_mshrs_3_io_schedule_bits_a_valid; // @[Scheduler.scala:71:46, :108:32]
wire _mshr_request_T_72 = _sourceA_io_req_ready | _mshr_request_T_71; // @[Scheduler.scala:40:23, :108:{29,32}]
wire _mshr_request_T_73 = _mshr_request_T_70 & _mshr_request_T_72; // @[Scheduler.scala:107:{25,31}, :108:29]
wire _mshr_request_T_74 = ~_mshrs_3_io_schedule_bits_b_valid; // @[Scheduler.scala:71:46, :109:32]
wire _mshr_request_T_75 = _sourceB_io_req_ready | _mshr_request_T_74; // @[Scheduler.scala:41:23, :109:{29,32}]
wire _mshr_request_T_76 = _mshr_request_T_73 & _mshr_request_T_75; // @[Scheduler.scala:107:31, :108:61, :109:29]
wire _mshr_request_T_77 = ~_mshrs_3_io_schedule_bits_c_valid; // @[Scheduler.scala:71:46, :110:32]
wire _mshr_request_T_78 = _sourceC_io_req_ready | _mshr_request_T_77; // @[Scheduler.scala:42:23, :110:{29,32}]
wire _mshr_request_T_79 = _mshr_request_T_76 & _mshr_request_T_78; // @[Scheduler.scala:108:61, :109:61, :110:29]
wire _mshr_request_T_80 = ~_mshrs_3_io_schedule_bits_d_valid; // @[Scheduler.scala:71:46, :111:32]
wire _mshr_request_T_81 = _sourceD_io_req_ready | _mshr_request_T_80; // @[Scheduler.scala:43:23, :111:{29,32}]
wire _mshr_request_T_82 = _mshr_request_T_79 & _mshr_request_T_81; // @[Scheduler.scala:109:61, :110:61, :111:29]
wire _mshr_request_T_83 = ~_mshrs_3_io_schedule_bits_e_valid; // @[Scheduler.scala:71:46, :112:32]
wire _mshr_request_T_84 = _sourceE_io_req_ready | _mshr_request_T_83; // @[Scheduler.scala:44:23, :112:{29,32}]
wire _mshr_request_T_85 = _mshr_request_T_82 & _mshr_request_T_84; // @[Scheduler.scala:110:61, :111:61, :112:29]
wire _mshr_request_T_86 = ~_mshrs_3_io_schedule_bits_x_valid; // @[Scheduler.scala:71:46, :113:32]
wire _mshr_request_T_87 = _sourceX_io_req_ready | _mshr_request_T_86; // @[Scheduler.scala:45:23, :113:{29,32}]
wire _mshr_request_T_88 = _mshr_request_T_85 & _mshr_request_T_87; // @[Scheduler.scala:111:61, :112:61, :113:29]
wire _mshr_request_T_89 = ~_mshrs_3_io_schedule_bits_dir_valid; // @[Scheduler.scala:71:46, :114:36]
wire _mshr_request_T_90 = _directory_io_write_ready | _mshr_request_T_89; // @[Scheduler.scala:68:25, :114:{33,36}]
wire _mshr_request_T_91 = _mshr_request_T_88 & _mshr_request_T_90; // @[Scheduler.scala:112:61, :113:61, :114:33]
wire _mshr_request_T_92 = ~mshr_stall_abc_4; // @[Scheduler.scala:90:86, :107:28]
wire _mshr_request_T_93 = _mshrs_4_io_schedule_valid & _mshr_request_T_92; // @[Scheduler.scala:71:46, :107:{25,28}]
wire _mshr_request_T_94 = ~_mshrs_4_io_schedule_bits_a_valid; // @[Scheduler.scala:71:46, :108:32]
wire _mshr_request_T_95 = _sourceA_io_req_ready | _mshr_request_T_94; // @[Scheduler.scala:40:23, :108:{29,32}]
wire _mshr_request_T_96 = _mshr_request_T_93 & _mshr_request_T_95; // @[Scheduler.scala:107:{25,31}, :108:29]
wire _mshr_request_T_97 = ~_mshrs_4_io_schedule_bits_b_valid; // @[Scheduler.scala:71:46, :109:32]
wire _mshr_request_T_98 = _sourceB_io_req_ready | _mshr_request_T_97; // @[Scheduler.scala:41:23, :109:{29,32}]
wire _mshr_request_T_99 = _mshr_request_T_96 & _mshr_request_T_98; // @[Scheduler.scala:107:31, :108:61, :109:29]
wire _mshr_request_T_100 = ~_mshrs_4_io_schedule_bits_c_valid; // @[Scheduler.scala:71:46, :110:32]
wire _mshr_request_T_101 = _sourceC_io_req_ready | _mshr_request_T_100; // @[Scheduler.scala:42:23, :110:{29,32}]
wire _mshr_request_T_102 = _mshr_request_T_99 & _mshr_request_T_101; // @[Scheduler.scala:108:61, :109:61, :110:29]
wire _mshr_request_T_103 = ~_mshrs_4_io_schedule_bits_d_valid; // @[Scheduler.scala:71:46, :111:32]
wire _mshr_request_T_104 = _sourceD_io_req_ready | _mshr_request_T_103; // @[Scheduler.scala:43:23, :111:{29,32}]
wire _mshr_request_T_105 = _mshr_request_T_102 & _mshr_request_T_104; // @[Scheduler.scala:109:61, :110:61, :111:29]
wire _mshr_request_T_106 = ~_mshrs_4_io_schedule_bits_e_valid; // @[Scheduler.scala:71:46, :112:32]
wire _mshr_request_T_107 = _sourceE_io_req_ready | _mshr_request_T_106; // @[Scheduler.scala:44:23, :112:{29,32}]
wire _mshr_request_T_108 = _mshr_request_T_105 & _mshr_request_T_107; // @[Scheduler.scala:110:61, :111:61, :112:29]
wire _mshr_request_T_109 = ~_mshrs_4_io_schedule_bits_x_valid; // @[Scheduler.scala:71:46, :113:32]
wire _mshr_request_T_110 = _sourceX_io_req_ready | _mshr_request_T_109; // @[Scheduler.scala:45:23, :113:{29,32}]
wire _mshr_request_T_111 = _mshr_request_T_108 & _mshr_request_T_110; // @[Scheduler.scala:111:61, :112:61, :113:29]
wire _mshr_request_T_112 = ~_mshrs_4_io_schedule_bits_dir_valid; // @[Scheduler.scala:71:46, :114:36]
wire _mshr_request_T_113 = _directory_io_write_ready | _mshr_request_T_112; // @[Scheduler.scala:68:25, :114:{33,36}]
wire _mshr_request_T_114 = _mshr_request_T_111 & _mshr_request_T_113; // @[Scheduler.scala:112:61, :113:61, :114:33]
wire _mshr_request_T_115 = ~mshr_stall_abc_5; // @[Scheduler.scala:90:86, :107:28]
wire _mshr_request_T_116 = _mshrs_5_io_schedule_valid & _mshr_request_T_115; // @[Scheduler.scala:71:46, :107:{25,28}]
wire _mshr_request_T_117 = ~_mshrs_5_io_schedule_bits_a_valid; // @[Scheduler.scala:71:46, :108:32]
wire _mshr_request_T_118 = _sourceA_io_req_ready | _mshr_request_T_117; // @[Scheduler.scala:40:23, :108:{29,32}]
wire _mshr_request_T_119 = _mshr_request_T_116 & _mshr_request_T_118; // @[Scheduler.scala:107:{25,31}, :108:29]
wire _mshr_request_T_120 = ~_mshrs_5_io_schedule_bits_b_valid; // @[Scheduler.scala:71:46, :109:32]
wire _mshr_request_T_121 = _sourceB_io_req_ready | _mshr_request_T_120; // @[Scheduler.scala:41:23, :109:{29,32}]
wire _mshr_request_T_122 = _mshr_request_T_119 & _mshr_request_T_121; // @[Scheduler.scala:107:31, :108:61, :109:29]
wire _mshr_request_T_123 = ~_mshrs_5_io_schedule_bits_c_valid; // @[Scheduler.scala:71:46, :110:32]
wire _mshr_request_T_124 = _sourceC_io_req_ready | _mshr_request_T_123; // @[Scheduler.scala:42:23, :110:{29,32}]
wire _mshr_request_T_125 = _mshr_request_T_122 & _mshr_request_T_124; // @[Scheduler.scala:108:61, :109:61, :110:29]
wire _mshr_request_T_126 = ~_mshrs_5_io_schedule_bits_d_valid; // @[Scheduler.scala:71:46, :111:32]
wire _mshr_request_T_127 = _sourceD_io_req_ready | _mshr_request_T_126; // @[Scheduler.scala:43:23, :111:{29,32}]
wire _mshr_request_T_128 = _mshr_request_T_125 & _mshr_request_T_127; // @[Scheduler.scala:109:61, :110:61, :111:29]
wire _mshr_request_T_129 = ~_mshrs_5_io_schedule_bits_e_valid; // @[Scheduler.scala:71:46, :112:32]
wire _mshr_request_T_130 = _sourceE_io_req_ready | _mshr_request_T_129; // @[Scheduler.scala:44:23, :112:{29,32}]
wire _mshr_request_T_131 = _mshr_request_T_128 & _mshr_request_T_130; // @[Scheduler.scala:110:61, :111:61, :112:29]
wire _mshr_request_T_132 = ~_mshrs_5_io_schedule_bits_x_valid; // @[Scheduler.scala:71:46, :113:32]
wire _mshr_request_T_133 = _sourceX_io_req_ready | _mshr_request_T_132; // @[Scheduler.scala:45:23, :113:{29,32}]
wire _mshr_request_T_134 = _mshr_request_T_131 & _mshr_request_T_133; // @[Scheduler.scala:111:61, :112:61, :113:29]
wire _mshr_request_T_135 = ~_mshrs_5_io_schedule_bits_dir_valid; // @[Scheduler.scala:71:46, :114:36]
wire _mshr_request_T_136 = _directory_io_write_ready | _mshr_request_T_135; // @[Scheduler.scala:68:25, :114:{33,36}]
wire _mshr_request_T_137 = _mshr_request_T_134 & _mshr_request_T_136; // @[Scheduler.scala:112:61, :113:61, :114:33]
wire _mshr_request_T_138 = ~mshr_stall_abc_6; // @[Scheduler.scala:90:86, :107:28]
wire _mshr_request_T_139 = _mshrs_6_io_schedule_valid & _mshr_request_T_138; // @[Scheduler.scala:71:46, :107:{25,28}]
wire _mshr_request_T_140 = ~_mshrs_6_io_schedule_bits_a_valid; // @[Scheduler.scala:71:46, :108:32]
wire _mshr_request_T_141 = _sourceA_io_req_ready | _mshr_request_T_140; // @[Scheduler.scala:40:23, :108:{29,32}]
wire _mshr_request_T_142 = _mshr_request_T_139 & _mshr_request_T_141; // @[Scheduler.scala:107:{25,31}, :108:29]
wire _mshr_request_T_143 = ~_mshrs_6_io_schedule_bits_b_valid; // @[Scheduler.scala:71:46, :109:32]
wire _mshr_request_T_144 = _sourceB_io_req_ready | _mshr_request_T_143; // @[Scheduler.scala:41:23, :109:{29,32}]
wire _mshr_request_T_145 = _mshr_request_T_142 & _mshr_request_T_144; // @[Scheduler.scala:107:31, :108:61, :109:29]
wire _mshr_request_T_146 = ~_mshrs_6_io_schedule_bits_c_valid; // @[Scheduler.scala:71:46, :110:32]
wire _mshr_request_T_147 = _sourceC_io_req_ready | _mshr_request_T_146; // @[Scheduler.scala:42:23, :110:{29,32}]
wire _mshr_request_T_148 = _mshr_request_T_145 & _mshr_request_T_147; // @[Scheduler.scala:108:61, :109:61, :110:29]
wire _mshr_request_T_149 = ~_mshrs_6_io_schedule_bits_d_valid; // @[Scheduler.scala:71:46, :111:32]
wire _mshr_request_T_150 = _sourceD_io_req_ready | _mshr_request_T_149; // @[Scheduler.scala:43:23, :111:{29,32}]
wire _mshr_request_T_151 = _mshr_request_T_148 & _mshr_request_T_150; // @[Scheduler.scala:109:61, :110:61, :111:29]
wire _mshr_request_T_152 = ~_mshrs_6_io_schedule_bits_e_valid; // @[Scheduler.scala:71:46, :112:32]
wire _mshr_request_T_153 = _sourceE_io_req_ready | _mshr_request_T_152; // @[Scheduler.scala:44:23, :112:{29,32}]
wire _mshr_request_T_154 = _mshr_request_T_151 & _mshr_request_T_153; // @[Scheduler.scala:110:61, :111:61, :112:29]
wire _mshr_request_T_155 = ~_mshrs_6_io_schedule_bits_x_valid; // @[Scheduler.scala:71:46, :113:32]
wire _mshr_request_T_156 = _sourceX_io_req_ready | _mshr_request_T_155; // @[Scheduler.scala:45:23, :113:{29,32}]
wire _mshr_request_T_157 = _mshr_request_T_154 & _mshr_request_T_156; // @[Scheduler.scala:111:61, :112:61, :113:29]
wire _mshr_request_T_158 = ~_mshrs_6_io_schedule_bits_dir_valid; // @[Scheduler.scala:71:46, :114:36]
wire _mshr_request_T_159 = _directory_io_write_ready | _mshr_request_T_158; // @[Scheduler.scala:68:25, :114:{33,36}]
wire _mshr_request_T_160 = _mshr_request_T_157 & _mshr_request_T_159; // @[Scheduler.scala:112:61, :113:61, :114:33]
wire _mshr_request_T_161 = ~mshr_stall_abc_7; // @[Scheduler.scala:90:86, :107:28]
wire _mshr_request_T_162 = _mshrs_7_io_schedule_valid & _mshr_request_T_161; // @[Scheduler.scala:71:46, :107:{25,28}]
wire _mshr_request_T_163 = ~_mshrs_7_io_schedule_bits_a_valid; // @[Scheduler.scala:71:46, :108:32]
wire _mshr_request_T_164 = _sourceA_io_req_ready | _mshr_request_T_163; // @[Scheduler.scala:40:23, :108:{29,32}]
wire _mshr_request_T_165 = _mshr_request_T_162 & _mshr_request_T_164; // @[Scheduler.scala:107:{25,31}, :108:29]
wire _mshr_request_T_166 = ~_mshrs_7_io_schedule_bits_b_valid; // @[Scheduler.scala:71:46, :109:32]
wire _mshr_request_T_167 = _sourceB_io_req_ready | _mshr_request_T_166; // @[Scheduler.scala:41:23, :109:{29,32}]
wire _mshr_request_T_168 = _mshr_request_T_165 & _mshr_request_T_167; // @[Scheduler.scala:107:31, :108:61, :109:29]
wire _mshr_request_T_169 = ~_mshrs_7_io_schedule_bits_c_valid; // @[Scheduler.scala:71:46, :110:32]
wire _mshr_request_T_170 = _sourceC_io_req_ready | _mshr_request_T_169; // @[Scheduler.scala:42:23, :110:{29,32}]
wire _mshr_request_T_171 = _mshr_request_T_168 & _mshr_request_T_170; // @[Scheduler.scala:108:61, :109:61, :110:29]
wire _mshr_request_T_172 = ~_mshrs_7_io_schedule_bits_d_valid; // @[Scheduler.scala:71:46, :111:32]
wire _mshr_request_T_173 = _sourceD_io_req_ready | _mshr_request_T_172; // @[Scheduler.scala:43:23, :111:{29,32}]
wire _mshr_request_T_174 = _mshr_request_T_171 & _mshr_request_T_173; // @[Scheduler.scala:109:61, :110:61, :111:29]
wire _mshr_request_T_175 = ~_mshrs_7_io_schedule_bits_e_valid; // @[Scheduler.scala:71:46, :112:32]
wire _mshr_request_T_176 = _sourceE_io_req_ready | _mshr_request_T_175; // @[Scheduler.scala:44:23, :112:{29,32}]
wire _mshr_request_T_177 = _mshr_request_T_174 & _mshr_request_T_176; // @[Scheduler.scala:110:61, :111:61, :112:29]
wire _mshr_request_T_178 = ~_mshrs_7_io_schedule_bits_x_valid; // @[Scheduler.scala:71:46, :113:32]
wire _mshr_request_T_179 = _sourceX_io_req_ready | _mshr_request_T_178; // @[Scheduler.scala:45:23, :113:{29,32}]
wire _mshr_request_T_180 = _mshr_request_T_177 & _mshr_request_T_179; // @[Scheduler.scala:111:61, :112:61, :113:29]
wire _mshr_request_T_181 = ~_mshrs_7_io_schedule_bits_dir_valid; // @[Scheduler.scala:71:46, :114:36]
wire _mshr_request_T_182 = _directory_io_write_ready | _mshr_request_T_181; // @[Scheduler.scala:68:25, :114:{33,36}]
wire _mshr_request_T_183 = _mshr_request_T_180 & _mshr_request_T_182; // @[Scheduler.scala:112:61, :113:61, :114:33]
wire _mshr_request_T_184 = ~mshr_stall_abc_8; // @[Scheduler.scala:90:86, :107:28]
wire _mshr_request_T_185 = _mshrs_8_io_schedule_valid & _mshr_request_T_184; // @[Scheduler.scala:71:46, :107:{25,28}]
wire _mshr_request_T_186 = ~_mshrs_8_io_schedule_bits_a_valid; // @[Scheduler.scala:71:46, :108:32]
wire _mshr_request_T_187 = _sourceA_io_req_ready | _mshr_request_T_186; // @[Scheduler.scala:40:23, :108:{29,32}]
wire _mshr_request_T_188 = _mshr_request_T_185 & _mshr_request_T_187; // @[Scheduler.scala:107:{25,31}, :108:29]
wire _mshr_request_T_189 = ~_mshrs_8_io_schedule_bits_b_valid; // @[Scheduler.scala:71:46, :109:32]
wire _mshr_request_T_190 = _sourceB_io_req_ready | _mshr_request_T_189; // @[Scheduler.scala:41:23, :109:{29,32}]
wire _mshr_request_T_191 = _mshr_request_T_188 & _mshr_request_T_190; // @[Scheduler.scala:107:31, :108:61, :109:29]
wire _mshr_request_T_192 = ~_mshrs_8_io_schedule_bits_c_valid; // @[Scheduler.scala:71:46, :110:32]
wire _mshr_request_T_193 = _sourceC_io_req_ready | _mshr_request_T_192; // @[Scheduler.scala:42:23, :110:{29,32}]
wire _mshr_request_T_194 = _mshr_request_T_191 & _mshr_request_T_193; // @[Scheduler.scala:108:61, :109:61, :110:29]
wire _mshr_request_T_195 = ~_mshrs_8_io_schedule_bits_d_valid; // @[Scheduler.scala:71:46, :111:32]
wire _mshr_request_T_196 = _sourceD_io_req_ready | _mshr_request_T_195; // @[Scheduler.scala:43:23, :111:{29,32}]
wire _mshr_request_T_197 = _mshr_request_T_194 & _mshr_request_T_196; // @[Scheduler.scala:109:61, :110:61, :111:29]
wire _mshr_request_T_198 = ~_mshrs_8_io_schedule_bits_e_valid; // @[Scheduler.scala:71:46, :112:32]
wire _mshr_request_T_199 = _sourceE_io_req_ready | _mshr_request_T_198; // @[Scheduler.scala:44:23, :112:{29,32}]
wire _mshr_request_T_200 = _mshr_request_T_197 & _mshr_request_T_199; // @[Scheduler.scala:110:61, :111:61, :112:29]
wire _mshr_request_T_201 = ~_mshrs_8_io_schedule_bits_x_valid; // @[Scheduler.scala:71:46, :113:32]
wire _mshr_request_T_202 = _sourceX_io_req_ready | _mshr_request_T_201; // @[Scheduler.scala:45:23, :113:{29,32}]
wire _mshr_request_T_203 = _mshr_request_T_200 & _mshr_request_T_202; // @[Scheduler.scala:111:61, :112:61, :113:29]
wire _mshr_request_T_204 = ~_mshrs_8_io_schedule_bits_dir_valid; // @[Scheduler.scala:71:46, :114:36]
wire _mshr_request_T_205 = _directory_io_write_ready | _mshr_request_T_204; // @[Scheduler.scala:68:25, :114:{33,36}]
wire _mshr_request_T_206 = _mshr_request_T_203 & _mshr_request_T_205; // @[Scheduler.scala:112:61, :113:61, :114:33]
wire _mshr_request_T_207 = ~mshr_stall_abc_9; // @[Scheduler.scala:90:86, :107:28]
wire _mshr_request_T_208 = _mshrs_9_io_schedule_valid & _mshr_request_T_207; // @[Scheduler.scala:71:46, :107:{25,28}]
wire _mshr_request_T_209 = ~_mshrs_9_io_schedule_bits_a_valid; // @[Scheduler.scala:71:46, :108:32]
wire _mshr_request_T_210 = _sourceA_io_req_ready | _mshr_request_T_209; // @[Scheduler.scala:40:23, :108:{29,32}]
wire _mshr_request_T_211 = _mshr_request_T_208 & _mshr_request_T_210; // @[Scheduler.scala:107:{25,31}, :108:29]
wire _mshr_request_T_212 = ~_mshrs_9_io_schedule_bits_b_valid; // @[Scheduler.scala:71:46, :109:32]
wire _mshr_request_T_213 = _sourceB_io_req_ready | _mshr_request_T_212; // @[Scheduler.scala:41:23, :109:{29,32}]
wire _mshr_request_T_214 = _mshr_request_T_211 & _mshr_request_T_213; // @[Scheduler.scala:107:31, :108:61, :109:29]
wire _mshr_request_T_215 = ~_mshrs_9_io_schedule_bits_c_valid; // @[Scheduler.scala:71:46, :110:32]
wire _mshr_request_T_216 = _sourceC_io_req_ready | _mshr_request_T_215; // @[Scheduler.scala:42:23, :110:{29,32}]
wire _mshr_request_T_217 = _mshr_request_T_214 & _mshr_request_T_216; // @[Scheduler.scala:108:61, :109:61, :110:29]
wire _mshr_request_T_218 = ~_mshrs_9_io_schedule_bits_d_valid; // @[Scheduler.scala:71:46, :111:32]
wire _mshr_request_T_219 = _sourceD_io_req_ready | _mshr_request_T_218; // @[Scheduler.scala:43:23, :111:{29,32}]
wire _mshr_request_T_220 = _mshr_request_T_217 & _mshr_request_T_219; // @[Scheduler.scala:109:61, :110:61, :111:29]
wire _mshr_request_T_221 = ~_mshrs_9_io_schedule_bits_e_valid; // @[Scheduler.scala:71:46, :112:32]
wire _mshr_request_T_222 = _sourceE_io_req_ready | _mshr_request_T_221; // @[Scheduler.scala:44:23, :112:{29,32}]
wire _mshr_request_T_223 = _mshr_request_T_220 & _mshr_request_T_222; // @[Scheduler.scala:110:61, :111:61, :112:29]
wire _mshr_request_T_224 = ~_mshrs_9_io_schedule_bits_x_valid; // @[Scheduler.scala:71:46, :113:32]
wire _mshr_request_T_225 = _sourceX_io_req_ready | _mshr_request_T_224; // @[Scheduler.scala:45:23, :113:{29,32}]
wire _mshr_request_T_226 = _mshr_request_T_223 & _mshr_request_T_225; // @[Scheduler.scala:111:61, :112:61, :113:29]
wire _mshr_request_T_227 = ~_mshrs_9_io_schedule_bits_dir_valid; // @[Scheduler.scala:71:46, :114:36]
wire _mshr_request_T_228 = _directory_io_write_ready | _mshr_request_T_227; // @[Scheduler.scala:68:25, :114:{33,36}]
wire _mshr_request_T_229 = _mshr_request_T_226 & _mshr_request_T_228; // @[Scheduler.scala:112:61, :113:61, :114:33]
wire _mshr_request_T_230 = ~mshr_stall_bc; // @[Scheduler.scala:94:28, :107:28]
wire _mshr_request_T_231 = _mshrs_10_io_schedule_valid & _mshr_request_T_230; // @[Scheduler.scala:71:46, :107:{25,28}]
wire _mshr_request_T_232 = ~_mshrs_10_io_schedule_bits_a_valid; // @[Scheduler.scala:71:46, :108:32]
wire _mshr_request_T_233 = _sourceA_io_req_ready | _mshr_request_T_232; // @[Scheduler.scala:40:23, :108:{29,32}]
wire _mshr_request_T_234 = _mshr_request_T_231 & _mshr_request_T_233; // @[Scheduler.scala:107:{25,31}, :108:29]
wire _mshr_request_T_235 = ~_mshrs_10_io_schedule_bits_b_valid; // @[Scheduler.scala:71:46, :109:32]
wire _mshr_request_T_236 = _sourceB_io_req_ready | _mshr_request_T_235; // @[Scheduler.scala:41:23, :109:{29,32}]
wire _mshr_request_T_237 = _mshr_request_T_234 & _mshr_request_T_236; // @[Scheduler.scala:107:31, :108:61, :109:29]
wire _mshr_request_T_238 = ~_mshrs_10_io_schedule_bits_c_valid; // @[Scheduler.scala:71:46, :110:32]
wire _mshr_request_T_239 = _sourceC_io_req_ready | _mshr_request_T_238; // @[Scheduler.scala:42:23, :110:{29,32}]
wire _mshr_request_T_240 = _mshr_request_T_237 & _mshr_request_T_239; // @[Scheduler.scala:108:61, :109:61, :110:29]
wire _mshr_request_T_241 = ~_mshrs_10_io_schedule_bits_d_valid; // @[Scheduler.scala:71:46, :111:32]
wire _mshr_request_T_242 = _sourceD_io_req_ready | _mshr_request_T_241; // @[Scheduler.scala:43:23, :111:{29,32}]
wire _mshr_request_T_243 = _mshr_request_T_240 & _mshr_request_T_242; // @[Scheduler.scala:109:61, :110:61, :111:29]
wire _mshr_request_T_244 = ~_mshrs_10_io_schedule_bits_e_valid; // @[Scheduler.scala:71:46, :112:32]
wire _mshr_request_T_245 = _sourceE_io_req_ready | _mshr_request_T_244; // @[Scheduler.scala:44:23, :112:{29,32}]
wire _mshr_request_T_246 = _mshr_request_T_243 & _mshr_request_T_245; // @[Scheduler.scala:110:61, :111:61, :112:29]
wire _mshr_request_T_247 = ~_mshrs_10_io_schedule_bits_x_valid; // @[Scheduler.scala:71:46, :113:32]
wire _mshr_request_T_248 = _sourceX_io_req_ready | _mshr_request_T_247; // @[Scheduler.scala:45:23, :113:{29,32}]
wire _mshr_request_T_249 = _mshr_request_T_246 & _mshr_request_T_248; // @[Scheduler.scala:111:61, :112:61, :113:29]
wire _mshr_request_T_250 = ~_mshrs_10_io_schedule_bits_dir_valid; // @[Scheduler.scala:71:46, :114:36]
wire _mshr_request_T_251 = _directory_io_write_ready | _mshr_request_T_250; // @[Scheduler.scala:68:25, :114:{33,36}]
wire _mshr_request_T_252 = _mshr_request_T_249 & _mshr_request_T_251; // @[Scheduler.scala:112:61, :113:61, :114:33]
wire _mshr_request_T_255 = ~_mshrs_11_io_schedule_bits_a_valid; // @[Scheduler.scala:71:46, :108:32]
wire _mshr_request_T_256 = _sourceA_io_req_ready | _mshr_request_T_255; // @[Scheduler.scala:40:23, :108:{29,32}]
wire _mshr_request_T_254; // @[Scheduler.scala:107:25]
wire _mshr_request_T_257 = _mshr_request_T_254 & _mshr_request_T_256; // @[Scheduler.scala:107:{25,31}, :108:29]
wire _mshr_request_T_258 = ~_mshrs_11_io_schedule_bits_b_valid; // @[Scheduler.scala:71:46, :109:32]
wire _mshr_request_T_259 = _sourceB_io_req_ready | _mshr_request_T_258; // @[Scheduler.scala:41:23, :109:{29,32}]
wire _mshr_request_T_260 = _mshr_request_T_257 & _mshr_request_T_259; // @[Scheduler.scala:107:31, :108:61, :109:29]
wire _mshr_request_T_261 = ~_mshrs_11_io_schedule_bits_c_valid; // @[Scheduler.scala:71:46, :110:32]
wire _mshr_request_T_262 = _sourceC_io_req_ready | _mshr_request_T_261; // @[Scheduler.scala:42:23, :110:{29,32}]
wire _mshr_request_T_263 = _mshr_request_T_260 & _mshr_request_T_262; // @[Scheduler.scala:108:61, :109:61, :110:29]
wire _mshr_request_T_264 = ~_mshrs_11_io_schedule_bits_d_valid; // @[Scheduler.scala:71:46, :111:32]
wire _mshr_request_T_265 = _sourceD_io_req_ready | _mshr_request_T_264; // @[Scheduler.scala:43:23, :111:{29,32}]
wire _mshr_request_T_266 = _mshr_request_T_263 & _mshr_request_T_265; // @[Scheduler.scala:109:61, :110:61, :111:29]
wire _mshr_request_T_267 = ~_mshrs_11_io_schedule_bits_e_valid; // @[Scheduler.scala:71:46, :112:32]
wire _mshr_request_T_268 = _sourceE_io_req_ready | _mshr_request_T_267; // @[Scheduler.scala:44:23, :112:{29,32}]
wire _mshr_request_T_269 = _mshr_request_T_266 & _mshr_request_T_268; // @[Scheduler.scala:110:61, :111:61, :112:29]
wire _mshr_request_T_270 = ~_mshrs_11_io_schedule_bits_x_valid; // @[Scheduler.scala:71:46, :113:32]
wire _mshr_request_T_271 = _sourceX_io_req_ready | _mshr_request_T_270; // @[Scheduler.scala:45:23, :113:{29,32}]
wire _mshr_request_T_272 = _mshr_request_T_269 & _mshr_request_T_271; // @[Scheduler.scala:111:61, :112:61, :113:29]
wire _mshr_request_T_273 = ~_mshrs_11_io_schedule_bits_dir_valid; // @[Scheduler.scala:71:46, :114:36]
wire _mshr_request_T_274 = _directory_io_write_ready | _mshr_request_T_273; // @[Scheduler.scala:68:25, :114:{33,36}]
wire _mshr_request_T_275 = _mshr_request_T_272 & _mshr_request_T_274; // @[Scheduler.scala:112:61, :113:61, :114:33]
wire [1:0] mshr_request_lo_lo_hi = {_mshr_request_T_68, _mshr_request_T_45}; // @[Scheduler.scala:106:25, :113:61]
wire [2:0] mshr_request_lo_lo = {mshr_request_lo_lo_hi, _mshr_request_T_22}; // @[Scheduler.scala:106:25, :113:61]
wire [1:0] mshr_request_lo_hi_hi = {_mshr_request_T_137, _mshr_request_T_114}; // @[Scheduler.scala:106:25, :113:61]
wire [2:0] mshr_request_lo_hi = {mshr_request_lo_hi_hi, _mshr_request_T_91}; // @[Scheduler.scala:106:25, :113:61]
wire [5:0] mshr_request_lo = {mshr_request_lo_hi, mshr_request_lo_lo}; // @[Scheduler.scala:106:25]
wire [1:0] mshr_request_hi_lo_hi = {_mshr_request_T_206, _mshr_request_T_183}; // @[Scheduler.scala:106:25, :113:61]
wire [2:0] mshr_request_hi_lo = {mshr_request_hi_lo_hi, _mshr_request_T_160}; // @[Scheduler.scala:106:25, :113:61]
wire [1:0] mshr_request_hi_hi_hi = {_mshr_request_T_275, _mshr_request_T_252}; // @[Scheduler.scala:106:25, :113:61]
wire [2:0] mshr_request_hi_hi = {mshr_request_hi_hi_hi, _mshr_request_T_229}; // @[Scheduler.scala:106:25, :113:61]
wire [5:0] mshr_request_hi = {mshr_request_hi_hi, mshr_request_hi_lo}; // @[Scheduler.scala:106:25]
wire [11:0] mshr_request = {mshr_request_hi, mshr_request_lo}; // @[Scheduler.scala:106:25]
reg [11:0] robin_filter; // @[Scheduler.scala:118:29]
wire [11:0] _robin_request_T = mshr_request & robin_filter; // @[Scheduler.scala:106:25, :118:29, :119:54]
wire [23:0] robin_request = {mshr_request, _robin_request_T}; // @[Scheduler.scala:106:25, :119:{26,54}]
wire [24:0] _mshr_selectOH2_T = {robin_request, 1'h0}; // @[package.scala:253:48]
wire [23:0] _mshr_selectOH2_T_1 = _mshr_selectOH2_T[23:0]; // @[package.scala:253:{48,53}]
wire [23:0] _mshr_selectOH2_T_2 = robin_request | _mshr_selectOH2_T_1; // @[package.scala:253:{43,53}]
wire [25:0] _mshr_selectOH2_T_3 = {_mshr_selectOH2_T_2, 2'h0}; // @[package.scala:253:{43,48}]
wire [23:0] _mshr_selectOH2_T_4 = _mshr_selectOH2_T_3[23:0]; // @[package.scala:253:{48,53}]
wire [23:0] _mshr_selectOH2_T_5 = _mshr_selectOH2_T_2 | _mshr_selectOH2_T_4; // @[package.scala:253:{43,53}]
wire [27:0] _mshr_selectOH2_T_6 = {_mshr_selectOH2_T_5, 4'h0}; // @[package.scala:253:{43,48}]
wire [23:0] _mshr_selectOH2_T_7 = _mshr_selectOH2_T_6[23:0]; // @[package.scala:253:{48,53}]
wire [23:0] _mshr_selectOH2_T_8 = _mshr_selectOH2_T_5 | _mshr_selectOH2_T_7; // @[package.scala:253:{43,53}]
wire [31:0] _mshr_selectOH2_T_9 = {_mshr_selectOH2_T_8, 8'h0}; // @[package.scala:253:{43,48}]
wire [23:0] _mshr_selectOH2_T_10 = _mshr_selectOH2_T_9[23:0]; // @[package.scala:253:{48,53}]
wire [23:0] _mshr_selectOH2_T_11 = _mshr_selectOH2_T_8 | _mshr_selectOH2_T_10; // @[package.scala:253:{43,53}]
wire [39:0] _mshr_selectOH2_T_12 = {_mshr_selectOH2_T_11, 16'h0}; // @[package.scala:253:{43,48}]
wire [23:0] _mshr_selectOH2_T_13 = _mshr_selectOH2_T_12[23:0]; // @[package.scala:253:{48,53}]
wire [23:0] _mshr_selectOH2_T_14 = _mshr_selectOH2_T_11 | _mshr_selectOH2_T_13; // @[package.scala:253:{43,53}]
wire [23:0] _mshr_selectOH2_T_15 = _mshr_selectOH2_T_14; // @[package.scala:253:43, :254:17]
wire [24:0] _mshr_selectOH2_T_16 = {_mshr_selectOH2_T_15, 1'h0}; // @[package.scala:254:17]
wire [24:0] _mshr_selectOH2_T_17 = ~_mshr_selectOH2_T_16; // @[Scheduler.scala:120:{24,48}]
wire [24:0] mshr_selectOH2 = {1'h0, _mshr_selectOH2_T_17[23:0] & robin_request}; // @[Scheduler.scala:119:26, :120:{24,54}]
wire [11:0] _mshr_selectOH_T = mshr_selectOH2[23:12]; // @[Scheduler.scala:120:54, :121:37]
wire [11:0] _mshr_selectOH_T_1 = mshr_selectOH2[11:0]; // @[Scheduler.scala:120:54, :121:86]
wire [11:0] mshr_selectOH = _mshr_selectOH_T | _mshr_selectOH_T_1; // @[Scheduler.scala:121:{37,70,86}]
wire [3:0] mshr_select_hi = mshr_selectOH[11:8]; // @[OneHot.scala:30:18]
wire [7:0] mshr_select_lo = mshr_selectOH[7:0]; // @[OneHot.scala:31:18]
wire _mshr_select_T = |mshr_select_hi; // @[OneHot.scala:30:18, :32:14]
wire [7:0] _mshr_select_T_1 = {4'h0, mshr_select_hi} | mshr_select_lo; // @[OneHot.scala:30:18, :31:18, :32:28]
wire [3:0] mshr_select_hi_1 = _mshr_select_T_1[7:4]; // @[OneHot.scala:30:18, :32:28]
wire [3:0] mshr_select_lo_1 = _mshr_select_T_1[3:0]; // @[OneHot.scala:31:18, :32:28]
wire _mshr_select_T_2 = |mshr_select_hi_1; // @[OneHot.scala:30:18, :32:14]
wire [3:0] _mshr_select_T_3 = mshr_select_hi_1 | mshr_select_lo_1; // @[OneHot.scala:30:18, :31:18, :32:28]
wire [1:0] mshr_select_hi_2 = _mshr_select_T_3[3:2]; // @[OneHot.scala:30:18, :32:28]
wire [1:0] mshr_select_lo_2 = _mshr_select_T_3[1:0]; // @[OneHot.scala:31:18, :32:28]
wire _mshr_select_T_4 = |mshr_select_hi_2; // @[OneHot.scala:30:18, :32:14]
wire [1:0] _mshr_select_T_5 = mshr_select_hi_2 | mshr_select_lo_2; // @[OneHot.scala:30:18, :31:18, :32:28]
wire _mshr_select_T_6 = _mshr_select_T_5[1]; // @[OneHot.scala:32:28]
wire [1:0] _mshr_select_T_7 = {_mshr_select_T_4, _mshr_select_T_6}; // @[OneHot.scala:32:{10,14}]
wire [2:0] _mshr_select_T_8 = {_mshr_select_T_2, _mshr_select_T_7}; // @[OneHot.scala:32:{10,14}]
wire [3:0] mshr_select = {_mshr_select_T, _mshr_select_T_8}; // @[OneHot.scala:32:{10,14}]
wire [3:0] schedule_a_bits_source = mshr_select; // @[OneHot.scala:32:10]
wire [3:0] schedule_d_bits_sink = mshr_select; // @[OneHot.scala:32:10]
wire _schedule_T = mshr_selectOH[0]; // @[Mux.scala:32:36]
wire _scheduleTag_T = mshr_selectOH[0]; // @[Mux.scala:32:36]
wire _scheduleSet_T = mshr_selectOH[0]; // @[Mux.scala:32:36]
wire sel = mshr_selectOH[0]; // @[Mux.scala:32:36]
wire _schedule_T_1 = mshr_selectOH[1]; // @[Mux.scala:32:36]
wire _scheduleTag_T_1 = mshr_selectOH[1]; // @[Mux.scala:32:36]
wire _scheduleSet_T_1 = mshr_selectOH[1]; // @[Mux.scala:32:36]
wire sel_1 = mshr_selectOH[1]; // @[Mux.scala:32:36]
wire _schedule_T_2 = mshr_selectOH[2]; // @[Mux.scala:32:36]
wire _scheduleTag_T_2 = mshr_selectOH[2]; // @[Mux.scala:32:36]
wire _scheduleSet_T_2 = mshr_selectOH[2]; // @[Mux.scala:32:36]
wire sel_2 = mshr_selectOH[2]; // @[Mux.scala:32:36]
wire _schedule_T_3 = mshr_selectOH[3]; // @[Mux.scala:32:36]
wire _scheduleTag_T_3 = mshr_selectOH[3]; // @[Mux.scala:32:36]
wire _scheduleSet_T_3 = mshr_selectOH[3]; // @[Mux.scala:32:36]
wire sel_3 = mshr_selectOH[3]; // @[Mux.scala:32:36]
wire _schedule_T_4 = mshr_selectOH[4]; // @[Mux.scala:32:36]
wire _scheduleTag_T_4 = mshr_selectOH[4]; // @[Mux.scala:32:36]
wire _scheduleSet_T_4 = mshr_selectOH[4]; // @[Mux.scala:32:36]
wire sel_4 = mshr_selectOH[4]; // @[Mux.scala:32:36]
wire _schedule_T_5 = mshr_selectOH[5]; // @[Mux.scala:32:36]
wire _scheduleTag_T_5 = mshr_selectOH[5]; // @[Mux.scala:32:36]
wire _scheduleSet_T_5 = mshr_selectOH[5]; // @[Mux.scala:32:36]
wire sel_5 = mshr_selectOH[5]; // @[Mux.scala:32:36]
wire _schedule_T_6 = mshr_selectOH[6]; // @[Mux.scala:32:36]
wire _scheduleTag_T_6 = mshr_selectOH[6]; // @[Mux.scala:32:36]
wire _scheduleSet_T_6 = mshr_selectOH[6]; // @[Mux.scala:32:36]
wire sel_6 = mshr_selectOH[6]; // @[Mux.scala:32:36]
wire _schedule_T_7 = mshr_selectOH[7]; // @[Mux.scala:32:36]
wire _scheduleTag_T_7 = mshr_selectOH[7]; // @[Mux.scala:32:36]
wire _scheduleSet_T_7 = mshr_selectOH[7]; // @[Mux.scala:32:36]
wire sel_7 = mshr_selectOH[7]; // @[Mux.scala:32:36]
wire _schedule_T_8 = mshr_selectOH[8]; // @[Mux.scala:32:36]
wire _scheduleTag_T_8 = mshr_selectOH[8]; // @[Mux.scala:32:36]
wire _scheduleSet_T_8 = mshr_selectOH[8]; // @[Mux.scala:32:36]
wire sel_8 = mshr_selectOH[8]; // @[Mux.scala:32:36]
wire _schedule_T_9 = mshr_selectOH[9]; // @[Mux.scala:32:36]
wire _scheduleTag_T_9 = mshr_selectOH[9]; // @[Mux.scala:32:36]
wire _scheduleSet_T_9 = mshr_selectOH[9]; // @[Mux.scala:32:36]
wire sel_9 = mshr_selectOH[9]; // @[Mux.scala:32:36]
wire _schedule_T_10 = mshr_selectOH[10]; // @[Mux.scala:32:36]
wire _scheduleTag_T_10 = mshr_selectOH[10]; // @[Mux.scala:32:36]
wire _scheduleSet_T_10 = mshr_selectOH[10]; // @[Mux.scala:32:36]
wire select_bc = mshr_selectOH[10]; // @[Mux.scala:32:36]
wire sel_10 = mshr_selectOH[10]; // @[Mux.scala:32:36]
wire _schedule_T_11 = mshr_selectOH[11]; // @[Mux.scala:32:36]
wire _scheduleTag_T_11 = mshr_selectOH[11]; // @[Mux.scala:32:36]
wire _scheduleSet_T_11 = mshr_selectOH[11]; // @[Mux.scala:32:36]
wire select_c = mshr_selectOH[11]; // @[Mux.scala:32:36]
wire sel_11 = mshr_selectOH[11]; // @[Mux.scala:32:36]
wire _schedule_WIRE_55_valid; // @[Mux.scala:30:73]
wire [8:0] _schedule_WIRE_55_bits_tag; // @[Mux.scala:30:73]
wire [10:0] _schedule_WIRE_55_bits_set; // @[Mux.scala:30:73]
wire [2:0] _schedule_WIRE_55_bits_param; // @[Mux.scala:30:73]
wire _schedule_WIRE_55_bits_block; // @[Mux.scala:30:73]
wire _schedule_WIRE_48_valid; // @[Mux.scala:30:73]
wire [2:0] _schedule_WIRE_48_bits_param; // @[Mux.scala:30:73]
wire [8:0] _schedule_WIRE_48_bits_tag; // @[Mux.scala:30:73]
wire [10:0] _schedule_WIRE_48_bits_set; // @[Mux.scala:30:73]
wire _schedule_WIRE_48_bits_clients; // @[Mux.scala:30:73]
wire _schedule_WIRE_38_valid; // @[Mux.scala:30:73]
wire [2:0] _schedule_WIRE_38_bits_opcode; // @[Mux.scala:30:73]
wire [2:0] _schedule_WIRE_38_bits_param; // @[Mux.scala:30:73]
wire [3:0] _schedule_c_bits_source_T_1; // @[Scheduler.scala:132:32]
wire [8:0] _schedule_WIRE_38_bits_tag; // @[Mux.scala:30:73]
wire [10:0] _schedule_WIRE_38_bits_set; // @[Mux.scala:30:73]
wire [3:0] _schedule_WIRE_38_bits_way; // @[Mux.scala:30:73]
wire _schedule_WIRE_38_bits_dirty; // @[Mux.scala:30:73]
wire _schedule_WIRE_19_valid; // @[Mux.scala:30:73]
wire _schedule_WIRE_19_bits_prio_0; // @[Mux.scala:30:73]
wire _schedule_WIRE_19_bits_prio_1; // @[Mux.scala:30:73]
wire _schedule_WIRE_19_bits_prio_2; // @[Mux.scala:30:73]
wire _schedule_WIRE_19_bits_control; // @[Mux.scala:30:73]
wire [2:0] _schedule_WIRE_19_bits_opcode; // @[Mux.scala:30:73]
wire [2:0] _schedule_WIRE_19_bits_param; // @[Mux.scala:30:73]
wire [2:0] _schedule_WIRE_19_bits_size; // @[Mux.scala:30:73]
wire [5:0] _schedule_WIRE_19_bits_source; // @[Mux.scala:30:73]
wire [8:0] _schedule_WIRE_19_bits_tag; // @[Mux.scala:30:73]
wire [5:0] _schedule_WIRE_19_bits_offset; // @[Mux.scala:30:73]
wire [5:0] _schedule_WIRE_19_bits_put; // @[Mux.scala:30:73]
wire [10:0] _schedule_WIRE_19_bits_set; // @[Mux.scala:30:73]
wire [3:0] _schedule_WIRE_19_bits_way; // @[Mux.scala:30:73]
wire _schedule_WIRE_19_bits_bad; // @[Mux.scala:30:73]
wire _schedule_WIRE_15_valid; // @[Mux.scala:30:73]
wire [2:0] _schedule_WIRE_15_bits_sink; // @[Mux.scala:30:73]
wire _schedule_WIRE_11_valid; // @[Mux.scala:30:73]
wire _schedule_WIRE_1_valid; // @[Mux.scala:30:73]
wire [10:0] _schedule_WIRE_1_bits_set; // @[Mux.scala:30:73]
wire [3:0] _schedule_WIRE_1_bits_way; // @[Mux.scala:30:73]
wire _schedule_WIRE_1_bits_data_dirty; // @[Mux.scala:30:73]
wire [1:0] _schedule_WIRE_1_bits_data_state; // @[Mux.scala:30:73]
wire _schedule_WIRE_1_bits_data_clients; // @[Mux.scala:30:73]
wire [8:0] _schedule_WIRE_1_bits_data_tag; // @[Mux.scala:30:73]
wire _schedule_WIRE; // @[Mux.scala:30:73]
wire [8:0] schedule_a_bits_tag; // @[Mux.scala:30:73]
wire [10:0] schedule_a_bits_set; // @[Mux.scala:30:73]
wire [2:0] schedule_a_bits_param; // @[Mux.scala:30:73]
wire schedule_a_bits_block; // @[Mux.scala:30:73]
wire schedule_a_valid; // @[Mux.scala:30:73]
wire [2:0] schedule_b_bits_param; // @[Mux.scala:30:73]
wire [8:0] schedule_b_bits_tag; // @[Mux.scala:30:73]
wire [10:0] schedule_b_bits_set; // @[Mux.scala:30:73]
wire schedule_b_bits_clients; // @[Mux.scala:30:73]
wire schedule_b_valid; // @[Mux.scala:30:73]
wire [2:0] schedule_c_bits_opcode; // @[Mux.scala:30:73]
wire [2:0] schedule_c_bits_param; // @[Mux.scala:30:73]
wire [3:0] schedule_c_bits_source; // @[Mux.scala:30:73]
wire [8:0] schedule_c_bits_tag; // @[Mux.scala:30:73]
wire [10:0] schedule_c_bits_set; // @[Mux.scala:30:73]
wire [3:0] schedule_c_bits_way; // @[Mux.scala:30:73]
wire schedule_c_bits_dirty; // @[Mux.scala:30:73]
wire schedule_c_valid; // @[Mux.scala:30:73]
wire schedule_d_bits_prio_0; // @[Mux.scala:30:73]
wire schedule_d_bits_prio_1; // @[Mux.scala:30:73]
wire schedule_d_bits_prio_2; // @[Mux.scala:30:73]
wire schedule_d_bits_control; // @[Mux.scala:30:73]
wire [2:0] schedule_d_bits_opcode; // @[Mux.scala:30:73]
wire [2:0] schedule_d_bits_param; // @[Mux.scala:30:73]
wire [2:0] schedule_d_bits_size; // @[Mux.scala:30:73]
wire [5:0] schedule_d_bits_source; // @[Mux.scala:30:73]
wire [8:0] schedule_d_bits_tag; // @[Mux.scala:30:73]
wire [5:0] schedule_d_bits_offset; // @[Mux.scala:30:73]
wire [5:0] schedule_d_bits_put; // @[Mux.scala:30:73]
wire [10:0] schedule_d_bits_set; // @[Mux.scala:30:73]
wire [3:0] schedule_d_bits_way; // @[Mux.scala:30:73]
wire schedule_d_bits_bad; // @[Mux.scala:30:73]
wire schedule_d_valid; // @[Mux.scala:30:73]
wire [2:0] schedule_e_bits_sink; // @[Mux.scala:30:73]
wire schedule_e_valid; // @[Mux.scala:30:73]
wire schedule_x_valid; // @[Mux.scala:30:73]
wire schedule_dir_bits_data_dirty; // @[Mux.scala:30:73]
wire [1:0] schedule_dir_bits_data_state; // @[Mux.scala:30:73]
wire schedule_dir_bits_data_clients; // @[Mux.scala:30:73]
wire [8:0] schedule_dir_bits_data_tag; // @[Mux.scala:30:73]
wire [10:0] schedule_dir_bits_set; // @[Mux.scala:30:73]
wire [3:0] schedule_dir_bits_way; // @[Mux.scala:30:73]
wire schedule_dir_valid; // @[Mux.scala:30:73]
wire schedule_reload; // @[Mux.scala:30:73]
wire _schedule_T_12 = _schedule_T & _mshrs_0_io_schedule_bits_reload; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_13 = _schedule_T_1 & _mshrs_1_io_schedule_bits_reload; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_14 = _schedule_T_2 & _mshrs_2_io_schedule_bits_reload; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_15 = _schedule_T_3 & _mshrs_3_io_schedule_bits_reload; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_16 = _schedule_T_4 & _mshrs_4_io_schedule_bits_reload; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_17 = _schedule_T_5 & _mshrs_5_io_schedule_bits_reload; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_18 = _schedule_T_6 & _mshrs_6_io_schedule_bits_reload; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_19 = _schedule_T_7 & _mshrs_7_io_schedule_bits_reload; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_20 = _schedule_T_8 & _mshrs_8_io_schedule_bits_reload; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_21 = _schedule_T_9 & _mshrs_9_io_schedule_bits_reload; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_22 = _schedule_T_10 & _mshrs_10_io_schedule_bits_reload; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_23 = _schedule_T_11 & _mshrs_11_io_schedule_bits_reload; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_24 = _schedule_T_12 | _schedule_T_13; // @[Mux.scala:30:73]
wire _schedule_T_25 = _schedule_T_24 | _schedule_T_14; // @[Mux.scala:30:73]
wire _schedule_T_26 = _schedule_T_25 | _schedule_T_15; // @[Mux.scala:30:73]
wire _schedule_T_27 = _schedule_T_26 | _schedule_T_16; // @[Mux.scala:30:73]
wire _schedule_T_28 = _schedule_T_27 | _schedule_T_17; // @[Mux.scala:30:73]
wire _schedule_T_29 = _schedule_T_28 | _schedule_T_18; // @[Mux.scala:30:73]
wire _schedule_T_30 = _schedule_T_29 | _schedule_T_19; // @[Mux.scala:30:73]
wire _schedule_T_31 = _schedule_T_30 | _schedule_T_20; // @[Mux.scala:30:73]
wire _schedule_T_32 = _schedule_T_31 | _schedule_T_21; // @[Mux.scala:30:73]
wire _schedule_T_33 = _schedule_T_32 | _schedule_T_22; // @[Mux.scala:30:73]
wire _schedule_T_34 = _schedule_T_33 | _schedule_T_23; // @[Mux.scala:30:73]
assign _schedule_WIRE = _schedule_T_34; // @[Mux.scala:30:73]
assign schedule_reload = _schedule_WIRE; // @[Mux.scala:30:73]
wire _schedule_WIRE_10; // @[Mux.scala:30:73]
assign schedule_dir_valid = _schedule_WIRE_1_valid; // @[Mux.scala:30:73]
wire [10:0] _schedule_WIRE_2_set; // @[Mux.scala:30:73]
assign schedule_dir_bits_set = _schedule_WIRE_1_bits_set; // @[Mux.scala:30:73]
wire [3:0] _schedule_WIRE_2_way; // @[Mux.scala:30:73]
assign schedule_dir_bits_way = _schedule_WIRE_1_bits_way; // @[Mux.scala:30:73]
wire _schedule_WIRE_2_data_dirty; // @[Mux.scala:30:73]
assign schedule_dir_bits_data_dirty = _schedule_WIRE_1_bits_data_dirty; // @[Mux.scala:30:73]
wire [1:0] _schedule_WIRE_2_data_state; // @[Mux.scala:30:73]
assign schedule_dir_bits_data_state = _schedule_WIRE_1_bits_data_state; // @[Mux.scala:30:73]
wire _schedule_WIRE_2_data_clients; // @[Mux.scala:30:73]
assign schedule_dir_bits_data_clients = _schedule_WIRE_1_bits_data_clients; // @[Mux.scala:30:73]
wire [8:0] _schedule_WIRE_2_data_tag; // @[Mux.scala:30:73]
assign schedule_dir_bits_data_tag = _schedule_WIRE_1_bits_data_tag; // @[Mux.scala:30:73]
wire [10:0] _schedule_WIRE_9; // @[Mux.scala:30:73]
assign _schedule_WIRE_1_bits_set = _schedule_WIRE_2_set; // @[Mux.scala:30:73]
wire [3:0] _schedule_WIRE_8; // @[Mux.scala:30:73]
assign _schedule_WIRE_1_bits_way = _schedule_WIRE_2_way; // @[Mux.scala:30:73]
wire _schedule_WIRE_3_dirty; // @[Mux.scala:30:73]
assign _schedule_WIRE_1_bits_data_dirty = _schedule_WIRE_2_data_dirty; // @[Mux.scala:30:73]
wire [1:0] _schedule_WIRE_3_state; // @[Mux.scala:30:73]
assign _schedule_WIRE_1_bits_data_state = _schedule_WIRE_2_data_state; // @[Mux.scala:30:73]
wire _schedule_WIRE_3_clients; // @[Mux.scala:30:73]
assign _schedule_WIRE_1_bits_data_clients = _schedule_WIRE_2_data_clients; // @[Mux.scala:30:73]
wire [8:0] _schedule_WIRE_3_tag; // @[Mux.scala:30:73]
assign _schedule_WIRE_1_bits_data_tag = _schedule_WIRE_2_data_tag; // @[Mux.scala:30:73]
wire _schedule_WIRE_7; // @[Mux.scala:30:73]
assign _schedule_WIRE_2_data_dirty = _schedule_WIRE_3_dirty; // @[Mux.scala:30:73]
wire [1:0] _schedule_WIRE_6; // @[Mux.scala:30:73]
assign _schedule_WIRE_2_data_state = _schedule_WIRE_3_state; // @[Mux.scala:30:73]
wire _schedule_WIRE_5; // @[Mux.scala:30:73]
assign _schedule_WIRE_2_data_clients = _schedule_WIRE_3_clients; // @[Mux.scala:30:73]
wire [8:0] _schedule_WIRE_4; // @[Mux.scala:30:73]
assign _schedule_WIRE_2_data_tag = _schedule_WIRE_3_tag; // @[Mux.scala:30:73]
wire [8:0] _schedule_T_35 = _schedule_T ? _mshrs_0_io_schedule_bits_dir_bits_data_tag : 9'h0; // @[Mux.scala:30:73, :32:36]
wire [8:0] _schedule_T_36 = _schedule_T_1 ? _mshrs_1_io_schedule_bits_dir_bits_data_tag : 9'h0; // @[Mux.scala:30:73, :32:36]
wire [8:0] _schedule_T_37 = _schedule_T_2 ? _mshrs_2_io_schedule_bits_dir_bits_data_tag : 9'h0; // @[Mux.scala:30:73, :32:36]
wire [8:0] _schedule_T_38 = _schedule_T_3 ? _mshrs_3_io_schedule_bits_dir_bits_data_tag : 9'h0; // @[Mux.scala:30:73, :32:36]
wire [8:0] _schedule_T_39 = _schedule_T_4 ? _mshrs_4_io_schedule_bits_dir_bits_data_tag : 9'h0; // @[Mux.scala:30:73, :32:36]
wire [8:0] _schedule_T_40 = _schedule_T_5 ? _mshrs_5_io_schedule_bits_dir_bits_data_tag : 9'h0; // @[Mux.scala:30:73, :32:36]
wire [8:0] _schedule_T_41 = _schedule_T_6 ? _mshrs_6_io_schedule_bits_dir_bits_data_tag : 9'h0; // @[Mux.scala:30:73, :32:36]
wire [8:0] _schedule_T_42 = _schedule_T_7 ? _mshrs_7_io_schedule_bits_dir_bits_data_tag : 9'h0; // @[Mux.scala:30:73, :32:36]
wire [8:0] _schedule_T_43 = _schedule_T_8 ? _mshrs_8_io_schedule_bits_dir_bits_data_tag : 9'h0; // @[Mux.scala:30:73, :32:36]
wire [8:0] _schedule_T_44 = _schedule_T_9 ? _mshrs_9_io_schedule_bits_dir_bits_data_tag : 9'h0; // @[Mux.scala:30:73, :32:36]
wire [8:0] _schedule_T_45 = _schedule_T_10 ? _mshrs_10_io_schedule_bits_dir_bits_data_tag : 9'h0; // @[Mux.scala:30:73, :32:36]
wire [8:0] _schedule_T_46 = _schedule_T_11 ? _mshrs_11_io_schedule_bits_dir_bits_data_tag : 9'h0; // @[Mux.scala:30:73, :32:36]
wire [8:0] _schedule_T_47 = _schedule_T_35 | _schedule_T_36; // @[Mux.scala:30:73]
wire [8:0] _schedule_T_48 = _schedule_T_47 | _schedule_T_37; // @[Mux.scala:30:73]
wire [8:0] _schedule_T_49 = _schedule_T_48 | _schedule_T_38; // @[Mux.scala:30:73]
wire [8:0] _schedule_T_50 = _schedule_T_49 | _schedule_T_39; // @[Mux.scala:30:73]
wire [8:0] _schedule_T_51 = _schedule_T_50 | _schedule_T_40; // @[Mux.scala:30:73]
wire [8:0] _schedule_T_52 = _schedule_T_51 | _schedule_T_41; // @[Mux.scala:30:73]
wire [8:0] _schedule_T_53 = _schedule_T_52 | _schedule_T_42; // @[Mux.scala:30:73]
wire [8:0] _schedule_T_54 = _schedule_T_53 | _schedule_T_43; // @[Mux.scala:30:73]
wire [8:0] _schedule_T_55 = _schedule_T_54 | _schedule_T_44; // @[Mux.scala:30:73]
wire [8:0] _schedule_T_56 = _schedule_T_55 | _schedule_T_45; // @[Mux.scala:30:73]
wire [8:0] _schedule_T_57 = _schedule_T_56 | _schedule_T_46; // @[Mux.scala:30:73]
assign _schedule_WIRE_4 = _schedule_T_57; // @[Mux.scala:30:73]
assign _schedule_WIRE_3_tag = _schedule_WIRE_4; // @[Mux.scala:30:73]
wire _schedule_T_58 = _schedule_T & _mshrs_0_io_schedule_bits_dir_bits_data_clients; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_59 = _schedule_T_1 & _mshrs_1_io_schedule_bits_dir_bits_data_clients; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_60 = _schedule_T_2 & _mshrs_2_io_schedule_bits_dir_bits_data_clients; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_61 = _schedule_T_3 & _mshrs_3_io_schedule_bits_dir_bits_data_clients; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_62 = _schedule_T_4 & _mshrs_4_io_schedule_bits_dir_bits_data_clients; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_63 = _schedule_T_5 & _mshrs_5_io_schedule_bits_dir_bits_data_clients; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_64 = _schedule_T_6 & _mshrs_6_io_schedule_bits_dir_bits_data_clients; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_65 = _schedule_T_7 & _mshrs_7_io_schedule_bits_dir_bits_data_clients; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_66 = _schedule_T_8 & _mshrs_8_io_schedule_bits_dir_bits_data_clients; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_67 = _schedule_T_9 & _mshrs_9_io_schedule_bits_dir_bits_data_clients; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_68 = _schedule_T_10 & _mshrs_10_io_schedule_bits_dir_bits_data_clients; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_69 = _schedule_T_11 & _mshrs_11_io_schedule_bits_dir_bits_data_clients; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_70 = _schedule_T_58 | _schedule_T_59; // @[Mux.scala:30:73]
wire _schedule_T_71 = _schedule_T_70 | _schedule_T_60; // @[Mux.scala:30:73]
wire _schedule_T_72 = _schedule_T_71 | _schedule_T_61; // @[Mux.scala:30:73]
wire _schedule_T_73 = _schedule_T_72 | _schedule_T_62; // @[Mux.scala:30:73]
wire _schedule_T_74 = _schedule_T_73 | _schedule_T_63; // @[Mux.scala:30:73]
wire _schedule_T_75 = _schedule_T_74 | _schedule_T_64; // @[Mux.scala:30:73]
wire _schedule_T_76 = _schedule_T_75 | _schedule_T_65; // @[Mux.scala:30:73]
wire _schedule_T_77 = _schedule_T_76 | _schedule_T_66; // @[Mux.scala:30:73]
wire _schedule_T_78 = _schedule_T_77 | _schedule_T_67; // @[Mux.scala:30:73]
wire _schedule_T_79 = _schedule_T_78 | _schedule_T_68; // @[Mux.scala:30:73]
wire _schedule_T_80 = _schedule_T_79 | _schedule_T_69; // @[Mux.scala:30:73]
assign _schedule_WIRE_5 = _schedule_T_80; // @[Mux.scala:30:73]
assign _schedule_WIRE_3_clients = _schedule_WIRE_5; // @[Mux.scala:30:73]
wire [1:0] _schedule_T_81 = _schedule_T ? _mshrs_0_io_schedule_bits_dir_bits_data_state : 2'h0; // @[Mux.scala:30:73, :32:36]
wire [1:0] _schedule_T_82 = _schedule_T_1 ? _mshrs_1_io_schedule_bits_dir_bits_data_state : 2'h0; // @[Mux.scala:30:73, :32:36]
wire [1:0] _schedule_T_83 = _schedule_T_2 ? _mshrs_2_io_schedule_bits_dir_bits_data_state : 2'h0; // @[Mux.scala:30:73, :32:36]
wire [1:0] _schedule_T_84 = _schedule_T_3 ? _mshrs_3_io_schedule_bits_dir_bits_data_state : 2'h0; // @[Mux.scala:30:73, :32:36]
wire [1:0] _schedule_T_85 = _schedule_T_4 ? _mshrs_4_io_schedule_bits_dir_bits_data_state : 2'h0; // @[Mux.scala:30:73, :32:36]
wire [1:0] _schedule_T_86 = _schedule_T_5 ? _mshrs_5_io_schedule_bits_dir_bits_data_state : 2'h0; // @[Mux.scala:30:73, :32:36]
wire [1:0] _schedule_T_87 = _schedule_T_6 ? _mshrs_6_io_schedule_bits_dir_bits_data_state : 2'h0; // @[Mux.scala:30:73, :32:36]
wire [1:0] _schedule_T_88 = _schedule_T_7 ? _mshrs_7_io_schedule_bits_dir_bits_data_state : 2'h0; // @[Mux.scala:30:73, :32:36]
wire [1:0] _schedule_T_89 = _schedule_T_8 ? _mshrs_8_io_schedule_bits_dir_bits_data_state : 2'h0; // @[Mux.scala:30:73, :32:36]
wire [1:0] _schedule_T_90 = _schedule_T_9 ? _mshrs_9_io_schedule_bits_dir_bits_data_state : 2'h0; // @[Mux.scala:30:73, :32:36]
wire [1:0] _schedule_T_91 = _schedule_T_10 ? _mshrs_10_io_schedule_bits_dir_bits_data_state : 2'h0; // @[Mux.scala:30:73, :32:36]
wire [1:0] _schedule_T_92 = _schedule_T_11 ? _mshrs_11_io_schedule_bits_dir_bits_data_state : 2'h0; // @[Mux.scala:30:73, :32:36]
wire [1:0] _schedule_T_93 = _schedule_T_81 | _schedule_T_82; // @[Mux.scala:30:73]
wire [1:0] _schedule_T_94 = _schedule_T_93 | _schedule_T_83; // @[Mux.scala:30:73]
wire [1:0] _schedule_T_95 = _schedule_T_94 | _schedule_T_84; // @[Mux.scala:30:73]
wire [1:0] _schedule_T_96 = _schedule_T_95 | _schedule_T_85; // @[Mux.scala:30:73]
wire [1:0] _schedule_T_97 = _schedule_T_96 | _schedule_T_86; // @[Mux.scala:30:73]
wire [1:0] _schedule_T_98 = _schedule_T_97 | _schedule_T_87; // @[Mux.scala:30:73]
wire [1:0] _schedule_T_99 = _schedule_T_98 | _schedule_T_88; // @[Mux.scala:30:73]
wire [1:0] _schedule_T_100 = _schedule_T_99 | _schedule_T_89; // @[Mux.scala:30:73]
wire [1:0] _schedule_T_101 = _schedule_T_100 | _schedule_T_90; // @[Mux.scala:30:73]
wire [1:0] _schedule_T_102 = _schedule_T_101 | _schedule_T_91; // @[Mux.scala:30:73]
wire [1:0] _schedule_T_103 = _schedule_T_102 | _schedule_T_92; // @[Mux.scala:30:73]
assign _schedule_WIRE_6 = _schedule_T_103; // @[Mux.scala:30:73]
assign _schedule_WIRE_3_state = _schedule_WIRE_6; // @[Mux.scala:30:73]
wire _schedule_T_104 = _schedule_T & _mshrs_0_io_schedule_bits_dir_bits_data_dirty; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_105 = _schedule_T_1 & _mshrs_1_io_schedule_bits_dir_bits_data_dirty; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_106 = _schedule_T_2 & _mshrs_2_io_schedule_bits_dir_bits_data_dirty; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_107 = _schedule_T_3 & _mshrs_3_io_schedule_bits_dir_bits_data_dirty; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_108 = _schedule_T_4 & _mshrs_4_io_schedule_bits_dir_bits_data_dirty; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_109 = _schedule_T_5 & _mshrs_5_io_schedule_bits_dir_bits_data_dirty; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_110 = _schedule_T_6 & _mshrs_6_io_schedule_bits_dir_bits_data_dirty; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_111 = _schedule_T_7 & _mshrs_7_io_schedule_bits_dir_bits_data_dirty; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_112 = _schedule_T_8 & _mshrs_8_io_schedule_bits_dir_bits_data_dirty; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_113 = _schedule_T_9 & _mshrs_9_io_schedule_bits_dir_bits_data_dirty; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_114 = _schedule_T_10 & _mshrs_10_io_schedule_bits_dir_bits_data_dirty; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_115 = _schedule_T_11 & _mshrs_11_io_schedule_bits_dir_bits_data_dirty; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_116 = _schedule_T_104 | _schedule_T_105; // @[Mux.scala:30:73]
wire _schedule_T_117 = _schedule_T_116 | _schedule_T_106; // @[Mux.scala:30:73]
wire _schedule_T_118 = _schedule_T_117 | _schedule_T_107; // @[Mux.scala:30:73]
wire _schedule_T_119 = _schedule_T_118 | _schedule_T_108; // @[Mux.scala:30:73]
wire _schedule_T_120 = _schedule_T_119 | _schedule_T_109; // @[Mux.scala:30:73]
wire _schedule_T_121 = _schedule_T_120 | _schedule_T_110; // @[Mux.scala:30:73]
wire _schedule_T_122 = _schedule_T_121 | _schedule_T_111; // @[Mux.scala:30:73]
wire _schedule_T_123 = _schedule_T_122 | _schedule_T_112; // @[Mux.scala:30:73]
wire _schedule_T_124 = _schedule_T_123 | _schedule_T_113; // @[Mux.scala:30:73]
wire _schedule_T_125 = _schedule_T_124 | _schedule_T_114; // @[Mux.scala:30:73]
wire _schedule_T_126 = _schedule_T_125 | _schedule_T_115; // @[Mux.scala:30:73]
assign _schedule_WIRE_7 = _schedule_T_126; // @[Mux.scala:30:73]
assign _schedule_WIRE_3_dirty = _schedule_WIRE_7; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_127 = _schedule_T ? _mshrs_0_io_schedule_bits_dir_bits_way : 4'h0; // @[Mux.scala:30:73, :32:36]
wire [3:0] _schedule_T_128 = _schedule_T_1 ? _mshrs_1_io_schedule_bits_dir_bits_way : 4'h0; // @[Mux.scala:30:73, :32:36]
wire [3:0] _schedule_T_129 = _schedule_T_2 ? _mshrs_2_io_schedule_bits_dir_bits_way : 4'h0; // @[Mux.scala:30:73, :32:36]
wire [3:0] _schedule_T_130 = _schedule_T_3 ? _mshrs_3_io_schedule_bits_dir_bits_way : 4'h0; // @[Mux.scala:30:73, :32:36]
wire [3:0] _schedule_T_131 = _schedule_T_4 ? _mshrs_4_io_schedule_bits_dir_bits_way : 4'h0; // @[Mux.scala:30:73, :32:36]
wire [3:0] _schedule_T_132 = _schedule_T_5 ? _mshrs_5_io_schedule_bits_dir_bits_way : 4'h0; // @[Mux.scala:30:73, :32:36]
wire [3:0] _schedule_T_133 = _schedule_T_6 ? _mshrs_6_io_schedule_bits_dir_bits_way : 4'h0; // @[Mux.scala:30:73, :32:36]
wire [3:0] _schedule_T_134 = _schedule_T_7 ? _mshrs_7_io_schedule_bits_dir_bits_way : 4'h0; // @[Mux.scala:30:73, :32:36]
wire [3:0] _schedule_T_135 = _schedule_T_8 ? _mshrs_8_io_schedule_bits_dir_bits_way : 4'h0; // @[Mux.scala:30:73, :32:36]
wire [3:0] _schedule_T_136 = _schedule_T_9 ? _mshrs_9_io_schedule_bits_dir_bits_way : 4'h0; // @[Mux.scala:30:73, :32:36]
wire [3:0] _schedule_T_137 = _schedule_T_10 ? _mshrs_10_io_schedule_bits_dir_bits_way : 4'h0; // @[Mux.scala:30:73, :32:36]
wire [3:0] _schedule_T_138 = _schedule_T_11 ? _mshrs_11_io_schedule_bits_dir_bits_way : 4'h0; // @[Mux.scala:30:73, :32:36]
wire [3:0] _schedule_T_139 = _schedule_T_127 | _schedule_T_128; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_140 = _schedule_T_139 | _schedule_T_129; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_141 = _schedule_T_140 | _schedule_T_130; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_142 = _schedule_T_141 | _schedule_T_131; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_143 = _schedule_T_142 | _schedule_T_132; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_144 = _schedule_T_143 | _schedule_T_133; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_145 = _schedule_T_144 | _schedule_T_134; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_146 = _schedule_T_145 | _schedule_T_135; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_147 = _schedule_T_146 | _schedule_T_136; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_148 = _schedule_T_147 | _schedule_T_137; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_149 = _schedule_T_148 | _schedule_T_138; // @[Mux.scala:30:73]
assign _schedule_WIRE_8 = _schedule_T_149; // @[Mux.scala:30:73]
assign _schedule_WIRE_2_way = _schedule_WIRE_8; // @[Mux.scala:30:73]
wire [10:0] _schedule_T_150 = _schedule_T ? _mshrs_0_io_schedule_bits_dir_bits_set : 11'h0; // @[Mux.scala:30:73, :32:36]
wire [10:0] _schedule_T_151 = _schedule_T_1 ? _mshrs_1_io_schedule_bits_dir_bits_set : 11'h0; // @[Mux.scala:30:73, :32:36]
wire [10:0] _schedule_T_152 = _schedule_T_2 ? _mshrs_2_io_schedule_bits_dir_bits_set : 11'h0; // @[Mux.scala:30:73, :32:36]
wire [10:0] _schedule_T_153 = _schedule_T_3 ? _mshrs_3_io_schedule_bits_dir_bits_set : 11'h0; // @[Mux.scala:30:73, :32:36]
wire [10:0] _schedule_T_154 = _schedule_T_4 ? _mshrs_4_io_schedule_bits_dir_bits_set : 11'h0; // @[Mux.scala:30:73, :32:36]
wire [10:0] _schedule_T_155 = _schedule_T_5 ? _mshrs_5_io_schedule_bits_dir_bits_set : 11'h0; // @[Mux.scala:30:73, :32:36]
wire [10:0] _schedule_T_156 = _schedule_T_6 ? _mshrs_6_io_schedule_bits_dir_bits_set : 11'h0; // @[Mux.scala:30:73, :32:36]
wire [10:0] _schedule_T_157 = _schedule_T_7 ? _mshrs_7_io_schedule_bits_dir_bits_set : 11'h0; // @[Mux.scala:30:73, :32:36]
wire [10:0] _schedule_T_158 = _schedule_T_8 ? _mshrs_8_io_schedule_bits_dir_bits_set : 11'h0; // @[Mux.scala:30:73, :32:36]
wire [10:0] _schedule_T_159 = _schedule_T_9 ? _mshrs_9_io_schedule_bits_dir_bits_set : 11'h0; // @[Mux.scala:30:73, :32:36]
wire [10:0] _schedule_T_160 = _schedule_T_10 ? _mshrs_10_io_schedule_bits_dir_bits_set : 11'h0; // @[Mux.scala:30:73, :32:36]
wire [10:0] _schedule_T_161 = _schedule_T_11 ? _mshrs_11_io_schedule_bits_dir_bits_set : 11'h0; // @[Mux.scala:30:73, :32:36]
wire [10:0] _schedule_T_162 = _schedule_T_150 | _schedule_T_151; // @[Mux.scala:30:73]
wire [10:0] _schedule_T_163 = _schedule_T_162 | _schedule_T_152; // @[Mux.scala:30:73]
wire [10:0] _schedule_T_164 = _schedule_T_163 | _schedule_T_153; // @[Mux.scala:30:73]
wire [10:0] _schedule_T_165 = _schedule_T_164 | _schedule_T_154; // @[Mux.scala:30:73]
wire [10:0] _schedule_T_166 = _schedule_T_165 | _schedule_T_155; // @[Mux.scala:30:73]
wire [10:0] _schedule_T_167 = _schedule_T_166 | _schedule_T_156; // @[Mux.scala:30:73]
wire [10:0] _schedule_T_168 = _schedule_T_167 | _schedule_T_157; // @[Mux.scala:30:73]
wire [10:0] _schedule_T_169 = _schedule_T_168 | _schedule_T_158; // @[Mux.scala:30:73]
wire [10:0] _schedule_T_170 = _schedule_T_169 | _schedule_T_159; // @[Mux.scala:30:73]
wire [10:0] _schedule_T_171 = _schedule_T_170 | _schedule_T_160; // @[Mux.scala:30:73]
wire [10:0] _schedule_T_172 = _schedule_T_171 | _schedule_T_161; // @[Mux.scala:30:73]
assign _schedule_WIRE_9 = _schedule_T_172; // @[Mux.scala:30:73]
assign _schedule_WIRE_2_set = _schedule_WIRE_9; // @[Mux.scala:30:73]
wire _schedule_T_173 = _schedule_T & _mshrs_0_io_schedule_bits_dir_valid; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_174 = _schedule_T_1 & _mshrs_1_io_schedule_bits_dir_valid; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_175 = _schedule_T_2 & _mshrs_2_io_schedule_bits_dir_valid; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_176 = _schedule_T_3 & _mshrs_3_io_schedule_bits_dir_valid; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_177 = _schedule_T_4 & _mshrs_4_io_schedule_bits_dir_valid; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_178 = _schedule_T_5 & _mshrs_5_io_schedule_bits_dir_valid; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_179 = _schedule_T_6 & _mshrs_6_io_schedule_bits_dir_valid; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_180 = _schedule_T_7 & _mshrs_7_io_schedule_bits_dir_valid; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_181 = _schedule_T_8 & _mshrs_8_io_schedule_bits_dir_valid; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_182 = _schedule_T_9 & _mshrs_9_io_schedule_bits_dir_valid; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_183 = _schedule_T_10 & _mshrs_10_io_schedule_bits_dir_valid; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_184 = _schedule_T_11 & _mshrs_11_io_schedule_bits_dir_valid; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_185 = _schedule_T_173 | _schedule_T_174; // @[Mux.scala:30:73]
wire _schedule_T_186 = _schedule_T_185 | _schedule_T_175; // @[Mux.scala:30:73]
wire _schedule_T_187 = _schedule_T_186 | _schedule_T_176; // @[Mux.scala:30:73]
wire _schedule_T_188 = _schedule_T_187 | _schedule_T_177; // @[Mux.scala:30:73]
wire _schedule_T_189 = _schedule_T_188 | _schedule_T_178; // @[Mux.scala:30:73]
wire _schedule_T_190 = _schedule_T_189 | _schedule_T_179; // @[Mux.scala:30:73]
wire _schedule_T_191 = _schedule_T_190 | _schedule_T_180; // @[Mux.scala:30:73]
wire _schedule_T_192 = _schedule_T_191 | _schedule_T_181; // @[Mux.scala:30:73]
wire _schedule_T_193 = _schedule_T_192 | _schedule_T_182; // @[Mux.scala:30:73]
wire _schedule_T_194 = _schedule_T_193 | _schedule_T_183; // @[Mux.scala:30:73]
wire _schedule_T_195 = _schedule_T_194 | _schedule_T_184; // @[Mux.scala:30:73]
assign _schedule_WIRE_10 = _schedule_T_195; // @[Mux.scala:30:73]
assign _schedule_WIRE_1_valid = _schedule_WIRE_10; // @[Mux.scala:30:73]
wire _schedule_WIRE_14; // @[Mux.scala:30:73]
assign schedule_x_valid = _schedule_WIRE_11_valid; // @[Mux.scala:30:73]
wire _schedule_T_219 = _schedule_T & _mshrs_0_io_schedule_bits_x_valid; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_220 = _schedule_T_1 & _mshrs_1_io_schedule_bits_x_valid; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_221 = _schedule_T_2 & _mshrs_2_io_schedule_bits_x_valid; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_222 = _schedule_T_3 & _mshrs_3_io_schedule_bits_x_valid; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_223 = _schedule_T_4 & _mshrs_4_io_schedule_bits_x_valid; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_224 = _schedule_T_5 & _mshrs_5_io_schedule_bits_x_valid; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_225 = _schedule_T_6 & _mshrs_6_io_schedule_bits_x_valid; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_226 = _schedule_T_7 & _mshrs_7_io_schedule_bits_x_valid; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_227 = _schedule_T_8 & _mshrs_8_io_schedule_bits_x_valid; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_228 = _schedule_T_9 & _mshrs_9_io_schedule_bits_x_valid; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_229 = _schedule_T_10 & _mshrs_10_io_schedule_bits_x_valid; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_230 = _schedule_T_11 & _mshrs_11_io_schedule_bits_x_valid; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_231 = _schedule_T_219 | _schedule_T_220; // @[Mux.scala:30:73]
wire _schedule_T_232 = _schedule_T_231 | _schedule_T_221; // @[Mux.scala:30:73]
wire _schedule_T_233 = _schedule_T_232 | _schedule_T_222; // @[Mux.scala:30:73]
wire _schedule_T_234 = _schedule_T_233 | _schedule_T_223; // @[Mux.scala:30:73]
wire _schedule_T_235 = _schedule_T_234 | _schedule_T_224; // @[Mux.scala:30:73]
wire _schedule_T_236 = _schedule_T_235 | _schedule_T_225; // @[Mux.scala:30:73]
wire _schedule_T_237 = _schedule_T_236 | _schedule_T_226; // @[Mux.scala:30:73]
wire _schedule_T_238 = _schedule_T_237 | _schedule_T_227; // @[Mux.scala:30:73]
wire _schedule_T_239 = _schedule_T_238 | _schedule_T_228; // @[Mux.scala:30:73]
wire _schedule_T_240 = _schedule_T_239 | _schedule_T_229; // @[Mux.scala:30:73]
wire _schedule_T_241 = _schedule_T_240 | _schedule_T_230; // @[Mux.scala:30:73]
assign _schedule_WIRE_14 = _schedule_T_241; // @[Mux.scala:30:73]
assign _schedule_WIRE_11_valid = _schedule_WIRE_14; // @[Mux.scala:30:73]
wire _schedule_WIRE_18; // @[Mux.scala:30:73]
assign schedule_e_valid = _schedule_WIRE_15_valid; // @[Mux.scala:30:73]
wire [2:0] _schedule_WIRE_16_sink; // @[Mux.scala:30:73]
assign schedule_e_bits_sink = _schedule_WIRE_15_bits_sink; // @[Mux.scala:30:73]
wire [2:0] _schedule_WIRE_17; // @[Mux.scala:30:73]
assign _schedule_WIRE_15_bits_sink = _schedule_WIRE_16_sink; // @[Mux.scala:30:73]
wire [2:0] _schedule_T_242 = _schedule_T ? _mshrs_0_io_schedule_bits_e_bits_sink : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _schedule_T_243 = _schedule_T_1 ? _mshrs_1_io_schedule_bits_e_bits_sink : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _schedule_T_244 = _schedule_T_2 ? _mshrs_2_io_schedule_bits_e_bits_sink : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _schedule_T_245 = _schedule_T_3 ? _mshrs_3_io_schedule_bits_e_bits_sink : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _schedule_T_246 = _schedule_T_4 ? _mshrs_4_io_schedule_bits_e_bits_sink : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _schedule_T_247 = _schedule_T_5 ? _mshrs_5_io_schedule_bits_e_bits_sink : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _schedule_T_248 = _schedule_T_6 ? _mshrs_6_io_schedule_bits_e_bits_sink : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _schedule_T_249 = _schedule_T_7 ? _mshrs_7_io_schedule_bits_e_bits_sink : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _schedule_T_250 = _schedule_T_8 ? _mshrs_8_io_schedule_bits_e_bits_sink : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _schedule_T_251 = _schedule_T_9 ? _mshrs_9_io_schedule_bits_e_bits_sink : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _schedule_T_252 = _schedule_T_10 ? _mshrs_10_io_schedule_bits_e_bits_sink : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _schedule_T_253 = _schedule_T_11 ? _mshrs_11_io_schedule_bits_e_bits_sink : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _schedule_T_254 = _schedule_T_242 | _schedule_T_243; // @[Mux.scala:30:73]
wire [2:0] _schedule_T_255 = _schedule_T_254 | _schedule_T_244; // @[Mux.scala:30:73]
wire [2:0] _schedule_T_256 = _schedule_T_255 | _schedule_T_245; // @[Mux.scala:30:73]
wire [2:0] _schedule_T_257 = _schedule_T_256 | _schedule_T_246; // @[Mux.scala:30:73]
wire [2:0] _schedule_T_258 = _schedule_T_257 | _schedule_T_247; // @[Mux.scala:30:73]
wire [2:0] _schedule_T_259 = _schedule_T_258 | _schedule_T_248; // @[Mux.scala:30:73]
wire [2:0] _schedule_T_260 = _schedule_T_259 | _schedule_T_249; // @[Mux.scala:30:73]
wire [2:0] _schedule_T_261 = _schedule_T_260 | _schedule_T_250; // @[Mux.scala:30:73]
wire [2:0] _schedule_T_262 = _schedule_T_261 | _schedule_T_251; // @[Mux.scala:30:73]
wire [2:0] _schedule_T_263 = _schedule_T_262 | _schedule_T_252; // @[Mux.scala:30:73]
wire [2:0] _schedule_T_264 = _schedule_T_263 | _schedule_T_253; // @[Mux.scala:30:73]
assign _schedule_WIRE_17 = _schedule_T_264; // @[Mux.scala:30:73]
assign _schedule_WIRE_16_sink = _schedule_WIRE_17; // @[Mux.scala:30:73]
wire _schedule_T_265 = _schedule_T & _mshrs_0_io_schedule_bits_e_valid; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_266 = _schedule_T_1 & _mshrs_1_io_schedule_bits_e_valid; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_267 = _schedule_T_2 & _mshrs_2_io_schedule_bits_e_valid; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_268 = _schedule_T_3 & _mshrs_3_io_schedule_bits_e_valid; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_269 = _schedule_T_4 & _mshrs_4_io_schedule_bits_e_valid; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_270 = _schedule_T_5 & _mshrs_5_io_schedule_bits_e_valid; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_271 = _schedule_T_6 & _mshrs_6_io_schedule_bits_e_valid; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_272 = _schedule_T_7 & _mshrs_7_io_schedule_bits_e_valid; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_273 = _schedule_T_8 & _mshrs_8_io_schedule_bits_e_valid; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_274 = _schedule_T_9 & _mshrs_9_io_schedule_bits_e_valid; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_275 = _schedule_T_10 & _mshrs_10_io_schedule_bits_e_valid; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_276 = _schedule_T_11 & _mshrs_11_io_schedule_bits_e_valid; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_277 = _schedule_T_265 | _schedule_T_266; // @[Mux.scala:30:73]
wire _schedule_T_278 = _schedule_T_277 | _schedule_T_267; // @[Mux.scala:30:73]
wire _schedule_T_279 = _schedule_T_278 | _schedule_T_268; // @[Mux.scala:30:73]
wire _schedule_T_280 = _schedule_T_279 | _schedule_T_269; // @[Mux.scala:30:73]
wire _schedule_T_281 = _schedule_T_280 | _schedule_T_270; // @[Mux.scala:30:73]
wire _schedule_T_282 = _schedule_T_281 | _schedule_T_271; // @[Mux.scala:30:73]
wire _schedule_T_283 = _schedule_T_282 | _schedule_T_272; // @[Mux.scala:30:73]
wire _schedule_T_284 = _schedule_T_283 | _schedule_T_273; // @[Mux.scala:30:73]
wire _schedule_T_285 = _schedule_T_284 | _schedule_T_274; // @[Mux.scala:30:73]
wire _schedule_T_286 = _schedule_T_285 | _schedule_T_275; // @[Mux.scala:30:73]
wire _schedule_T_287 = _schedule_T_286 | _schedule_T_276; // @[Mux.scala:30:73]
assign _schedule_WIRE_18 = _schedule_T_287; // @[Mux.scala:30:73]
assign _schedule_WIRE_15_valid = _schedule_WIRE_18; // @[Mux.scala:30:73]
wire _schedule_WIRE_37; // @[Mux.scala:30:73]
assign schedule_d_valid = _schedule_WIRE_19_valid; // @[Mux.scala:30:73]
wire _schedule_WIRE_20_prio_0; // @[Mux.scala:30:73]
assign schedule_d_bits_prio_0 = _schedule_WIRE_19_bits_prio_0; // @[Mux.scala:30:73]
wire _schedule_WIRE_20_prio_1; // @[Mux.scala:30:73]
assign schedule_d_bits_prio_1 = _schedule_WIRE_19_bits_prio_1; // @[Mux.scala:30:73]
wire _schedule_WIRE_20_prio_2; // @[Mux.scala:30:73]
assign schedule_d_bits_prio_2 = _schedule_WIRE_19_bits_prio_2; // @[Mux.scala:30:73]
wire _schedule_WIRE_20_control; // @[Mux.scala:30:73]
assign schedule_d_bits_control = _schedule_WIRE_19_bits_control; // @[Mux.scala:30:73]
wire [2:0] _schedule_WIRE_20_opcode; // @[Mux.scala:30:73]
assign schedule_d_bits_opcode = _schedule_WIRE_19_bits_opcode; // @[Mux.scala:30:73]
wire [2:0] _schedule_WIRE_20_param; // @[Mux.scala:30:73]
assign schedule_d_bits_param = _schedule_WIRE_19_bits_param; // @[Mux.scala:30:73]
wire [2:0] _schedule_WIRE_20_size; // @[Mux.scala:30:73]
assign schedule_d_bits_size = _schedule_WIRE_19_bits_size; // @[Mux.scala:30:73]
wire [5:0] _schedule_WIRE_20_source; // @[Mux.scala:30:73]
assign schedule_d_bits_source = _schedule_WIRE_19_bits_source; // @[Mux.scala:30:73]
wire [8:0] _schedule_WIRE_20_tag; // @[Mux.scala:30:73]
assign schedule_d_bits_tag = _schedule_WIRE_19_bits_tag; // @[Mux.scala:30:73]
wire [5:0] _schedule_WIRE_20_offset; // @[Mux.scala:30:73]
assign schedule_d_bits_offset = _schedule_WIRE_19_bits_offset; // @[Mux.scala:30:73]
wire [5:0] _schedule_WIRE_20_put; // @[Mux.scala:30:73]
assign schedule_d_bits_put = _schedule_WIRE_19_bits_put; // @[Mux.scala:30:73]
wire [10:0] _schedule_WIRE_20_set; // @[Mux.scala:30:73]
assign schedule_d_bits_set = _schedule_WIRE_19_bits_set; // @[Mux.scala:30:73]
wire [3:0] _schedule_WIRE_20_way; // @[Mux.scala:30:73]
assign schedule_d_bits_way = _schedule_WIRE_19_bits_way; // @[Mux.scala:30:73]
wire _schedule_WIRE_20_bad; // @[Mux.scala:30:73]
assign schedule_d_bits_bad = _schedule_WIRE_19_bits_bad; // @[Mux.scala:30:73]
wire _schedule_WIRE_33_0; // @[Mux.scala:30:73]
assign _schedule_WIRE_19_bits_prio_0 = _schedule_WIRE_20_prio_0; // @[Mux.scala:30:73]
wire _schedule_WIRE_33_1; // @[Mux.scala:30:73]
assign _schedule_WIRE_19_bits_prio_1 = _schedule_WIRE_20_prio_1; // @[Mux.scala:30:73]
wire _schedule_WIRE_33_2; // @[Mux.scala:30:73]
assign _schedule_WIRE_19_bits_prio_2 = _schedule_WIRE_20_prio_2; // @[Mux.scala:30:73]
wire _schedule_WIRE_32; // @[Mux.scala:30:73]
assign _schedule_WIRE_19_bits_control = _schedule_WIRE_20_control; // @[Mux.scala:30:73]
wire [2:0] _schedule_WIRE_31; // @[Mux.scala:30:73]
assign _schedule_WIRE_19_bits_opcode = _schedule_WIRE_20_opcode; // @[Mux.scala:30:73]
wire [2:0] _schedule_WIRE_30; // @[Mux.scala:30:73]
assign _schedule_WIRE_19_bits_param = _schedule_WIRE_20_param; // @[Mux.scala:30:73]
wire [2:0] _schedule_WIRE_29; // @[Mux.scala:30:73]
assign _schedule_WIRE_19_bits_size = _schedule_WIRE_20_size; // @[Mux.scala:30:73]
wire [5:0] _schedule_WIRE_28; // @[Mux.scala:30:73]
assign _schedule_WIRE_19_bits_source = _schedule_WIRE_20_source; // @[Mux.scala:30:73]
wire [8:0] _schedule_WIRE_27; // @[Mux.scala:30:73]
assign _schedule_WIRE_19_bits_tag = _schedule_WIRE_20_tag; // @[Mux.scala:30:73]
wire [5:0] _schedule_WIRE_26; // @[Mux.scala:30:73]
assign _schedule_WIRE_19_bits_offset = _schedule_WIRE_20_offset; // @[Mux.scala:30:73]
wire [5:0] _schedule_WIRE_25; // @[Mux.scala:30:73]
assign _schedule_WIRE_19_bits_put = _schedule_WIRE_20_put; // @[Mux.scala:30:73]
wire [10:0] _schedule_WIRE_24; // @[Mux.scala:30:73]
assign _schedule_WIRE_19_bits_set = _schedule_WIRE_20_set; // @[Mux.scala:30:73]
wire [3:0] _schedule_WIRE_22; // @[Mux.scala:30:73]
assign _schedule_WIRE_19_bits_way = _schedule_WIRE_20_way; // @[Mux.scala:30:73]
wire _schedule_WIRE_21; // @[Mux.scala:30:73]
assign _schedule_WIRE_19_bits_bad = _schedule_WIRE_20_bad; // @[Mux.scala:30:73]
wire _schedule_T_288 = _schedule_T & _mshrs_0_io_schedule_bits_d_bits_bad; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_289 = _schedule_T_1 & _mshrs_1_io_schedule_bits_d_bits_bad; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_290 = _schedule_T_2 & _mshrs_2_io_schedule_bits_d_bits_bad; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_291 = _schedule_T_3 & _mshrs_3_io_schedule_bits_d_bits_bad; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_292 = _schedule_T_4 & _mshrs_4_io_schedule_bits_d_bits_bad; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_293 = _schedule_T_5 & _mshrs_5_io_schedule_bits_d_bits_bad; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_294 = _schedule_T_6 & _mshrs_6_io_schedule_bits_d_bits_bad; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_295 = _schedule_T_7 & _mshrs_7_io_schedule_bits_d_bits_bad; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_296 = _schedule_T_8 & _mshrs_8_io_schedule_bits_d_bits_bad; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_297 = _schedule_T_9 & _mshrs_9_io_schedule_bits_d_bits_bad; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_298 = _schedule_T_10 & _mshrs_10_io_schedule_bits_d_bits_bad; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_299 = _schedule_T_11 & _mshrs_11_io_schedule_bits_d_bits_bad; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_300 = _schedule_T_288 | _schedule_T_289; // @[Mux.scala:30:73]
wire _schedule_T_301 = _schedule_T_300 | _schedule_T_290; // @[Mux.scala:30:73]
wire _schedule_T_302 = _schedule_T_301 | _schedule_T_291; // @[Mux.scala:30:73]
wire _schedule_T_303 = _schedule_T_302 | _schedule_T_292; // @[Mux.scala:30:73]
wire _schedule_T_304 = _schedule_T_303 | _schedule_T_293; // @[Mux.scala:30:73]
wire _schedule_T_305 = _schedule_T_304 | _schedule_T_294; // @[Mux.scala:30:73]
wire _schedule_T_306 = _schedule_T_305 | _schedule_T_295; // @[Mux.scala:30:73]
wire _schedule_T_307 = _schedule_T_306 | _schedule_T_296; // @[Mux.scala:30:73]
wire _schedule_T_308 = _schedule_T_307 | _schedule_T_297; // @[Mux.scala:30:73]
wire _schedule_T_309 = _schedule_T_308 | _schedule_T_298; // @[Mux.scala:30:73]
wire _schedule_T_310 = _schedule_T_309 | _schedule_T_299; // @[Mux.scala:30:73]
assign _schedule_WIRE_21 = _schedule_T_310; // @[Mux.scala:30:73]
assign _schedule_WIRE_20_bad = _schedule_WIRE_21; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_311 = _schedule_T ? _mshrs_0_io_schedule_bits_d_bits_way : 4'h0; // @[Mux.scala:30:73, :32:36]
wire [3:0] _schedule_T_312 = _schedule_T_1 ? _mshrs_1_io_schedule_bits_d_bits_way : 4'h0; // @[Mux.scala:30:73, :32:36]
wire [3:0] _schedule_T_313 = _schedule_T_2 ? _mshrs_2_io_schedule_bits_d_bits_way : 4'h0; // @[Mux.scala:30:73, :32:36]
wire [3:0] _schedule_T_314 = _schedule_T_3 ? _mshrs_3_io_schedule_bits_d_bits_way : 4'h0; // @[Mux.scala:30:73, :32:36]
wire [3:0] _schedule_T_315 = _schedule_T_4 ? _mshrs_4_io_schedule_bits_d_bits_way : 4'h0; // @[Mux.scala:30:73, :32:36]
wire [3:0] _schedule_T_316 = _schedule_T_5 ? _mshrs_5_io_schedule_bits_d_bits_way : 4'h0; // @[Mux.scala:30:73, :32:36]
wire [3:0] _schedule_T_317 = _schedule_T_6 ? _mshrs_6_io_schedule_bits_d_bits_way : 4'h0; // @[Mux.scala:30:73, :32:36]
wire [3:0] _schedule_T_318 = _schedule_T_7 ? _mshrs_7_io_schedule_bits_d_bits_way : 4'h0; // @[Mux.scala:30:73, :32:36]
wire [3:0] _schedule_T_319 = _schedule_T_8 ? _mshrs_8_io_schedule_bits_d_bits_way : 4'h0; // @[Mux.scala:30:73, :32:36]
wire [3:0] _schedule_T_320 = _schedule_T_9 ? _mshrs_9_io_schedule_bits_d_bits_way : 4'h0; // @[Mux.scala:30:73, :32:36]
wire [3:0] _schedule_T_321 = _schedule_T_10 ? _mshrs_10_io_schedule_bits_d_bits_way : 4'h0; // @[Mux.scala:30:73, :32:36]
wire [3:0] _schedule_T_322 = _schedule_T_11 ? _mshrs_11_io_schedule_bits_d_bits_way : 4'h0; // @[Mux.scala:30:73, :32:36]
wire [3:0] _schedule_T_323 = _schedule_T_311 | _schedule_T_312; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_324 = _schedule_T_323 | _schedule_T_313; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_325 = _schedule_T_324 | _schedule_T_314; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_326 = _schedule_T_325 | _schedule_T_315; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_327 = _schedule_T_326 | _schedule_T_316; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_328 = _schedule_T_327 | _schedule_T_317; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_329 = _schedule_T_328 | _schedule_T_318; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_330 = _schedule_T_329 | _schedule_T_319; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_331 = _schedule_T_330 | _schedule_T_320; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_332 = _schedule_T_331 | _schedule_T_321; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_333 = _schedule_T_332 | _schedule_T_322; // @[Mux.scala:30:73]
assign _schedule_WIRE_22 = _schedule_T_333; // @[Mux.scala:30:73]
assign _schedule_WIRE_20_way = _schedule_WIRE_22; // @[Mux.scala:30:73]
wire [10:0] _schedule_T_357 = _schedule_T ? _mshrs_0_io_schedule_bits_d_bits_set : 11'h0; // @[Mux.scala:30:73, :32:36]
wire [10:0] _schedule_T_358 = _schedule_T_1 ? _mshrs_1_io_schedule_bits_d_bits_set : 11'h0; // @[Mux.scala:30:73, :32:36]
wire [10:0] _schedule_T_359 = _schedule_T_2 ? _mshrs_2_io_schedule_bits_d_bits_set : 11'h0; // @[Mux.scala:30:73, :32:36]
wire [10:0] _schedule_T_360 = _schedule_T_3 ? _mshrs_3_io_schedule_bits_d_bits_set : 11'h0; // @[Mux.scala:30:73, :32:36]
wire [10:0] _schedule_T_361 = _schedule_T_4 ? _mshrs_4_io_schedule_bits_d_bits_set : 11'h0; // @[Mux.scala:30:73, :32:36]
wire [10:0] _schedule_T_362 = _schedule_T_5 ? _mshrs_5_io_schedule_bits_d_bits_set : 11'h0; // @[Mux.scala:30:73, :32:36]
wire [10:0] _schedule_T_363 = _schedule_T_6 ? _mshrs_6_io_schedule_bits_d_bits_set : 11'h0; // @[Mux.scala:30:73, :32:36]
wire [10:0] _schedule_T_364 = _schedule_T_7 ? _mshrs_7_io_schedule_bits_d_bits_set : 11'h0; // @[Mux.scala:30:73, :32:36]
wire [10:0] _schedule_T_365 = _schedule_T_8 ? _mshrs_8_io_schedule_bits_d_bits_set : 11'h0; // @[Mux.scala:30:73, :32:36]
wire [10:0] _schedule_T_366 = _schedule_T_9 ? _mshrs_9_io_schedule_bits_d_bits_set : 11'h0; // @[Mux.scala:30:73, :32:36]
wire [10:0] _schedule_T_367 = _schedule_T_10 ? _mshrs_10_io_schedule_bits_d_bits_set : 11'h0; // @[Mux.scala:30:73, :32:36]
wire [10:0] _schedule_T_368 = _schedule_T_11 ? _mshrs_11_io_schedule_bits_d_bits_set : 11'h0; // @[Mux.scala:30:73, :32:36]
wire [10:0] _schedule_T_369 = _schedule_T_357 | _schedule_T_358; // @[Mux.scala:30:73]
wire [10:0] _schedule_T_370 = _schedule_T_369 | _schedule_T_359; // @[Mux.scala:30:73]
wire [10:0] _schedule_T_371 = _schedule_T_370 | _schedule_T_360; // @[Mux.scala:30:73]
wire [10:0] _schedule_T_372 = _schedule_T_371 | _schedule_T_361; // @[Mux.scala:30:73]
wire [10:0] _schedule_T_373 = _schedule_T_372 | _schedule_T_362; // @[Mux.scala:30:73]
wire [10:0] _schedule_T_374 = _schedule_T_373 | _schedule_T_363; // @[Mux.scala:30:73]
wire [10:0] _schedule_T_375 = _schedule_T_374 | _schedule_T_364; // @[Mux.scala:30:73]
wire [10:0] _schedule_T_376 = _schedule_T_375 | _schedule_T_365; // @[Mux.scala:30:73]
wire [10:0] _schedule_T_377 = _schedule_T_376 | _schedule_T_366; // @[Mux.scala:30:73]
wire [10:0] _schedule_T_378 = _schedule_T_377 | _schedule_T_367; // @[Mux.scala:30:73]
wire [10:0] _schedule_T_379 = _schedule_T_378 | _schedule_T_368; // @[Mux.scala:30:73]
assign _schedule_WIRE_24 = _schedule_T_379; // @[Mux.scala:30:73]
assign _schedule_WIRE_20_set = _schedule_WIRE_24; // @[Mux.scala:30:73]
wire [5:0] _schedule_T_380 = _schedule_T ? _mshrs_0_io_schedule_bits_d_bits_put : 6'h0; // @[Mux.scala:30:73, :32:36]
wire [5:0] _schedule_T_381 = _schedule_T_1 ? _mshrs_1_io_schedule_bits_d_bits_put : 6'h0; // @[Mux.scala:30:73, :32:36]
wire [5:0] _schedule_T_382 = _schedule_T_2 ? _mshrs_2_io_schedule_bits_d_bits_put : 6'h0; // @[Mux.scala:30:73, :32:36]
wire [5:0] _schedule_T_383 = _schedule_T_3 ? _mshrs_3_io_schedule_bits_d_bits_put : 6'h0; // @[Mux.scala:30:73, :32:36]
wire [5:0] _schedule_T_384 = _schedule_T_4 ? _mshrs_4_io_schedule_bits_d_bits_put : 6'h0; // @[Mux.scala:30:73, :32:36]
wire [5:0] _schedule_T_385 = _schedule_T_5 ? _mshrs_5_io_schedule_bits_d_bits_put : 6'h0; // @[Mux.scala:30:73, :32:36]
wire [5:0] _schedule_T_386 = _schedule_T_6 ? _mshrs_6_io_schedule_bits_d_bits_put : 6'h0; // @[Mux.scala:30:73, :32:36]
wire [5:0] _schedule_T_387 = _schedule_T_7 ? _mshrs_7_io_schedule_bits_d_bits_put : 6'h0; // @[Mux.scala:30:73, :32:36]
wire [5:0] _schedule_T_388 = _schedule_T_8 ? _mshrs_8_io_schedule_bits_d_bits_put : 6'h0; // @[Mux.scala:30:73, :32:36]
wire [5:0] _schedule_T_389 = _schedule_T_9 ? _mshrs_9_io_schedule_bits_d_bits_put : 6'h0; // @[Mux.scala:30:73, :32:36]
wire [5:0] _schedule_T_390 = _schedule_T_10 ? _mshrs_10_io_schedule_bits_d_bits_put : 6'h0; // @[Mux.scala:30:73, :32:36]
wire [5:0] _schedule_T_391 = _schedule_T_11 ? _mshrs_11_io_schedule_bits_d_bits_put : 6'h0; // @[Mux.scala:30:73, :32:36]
wire [5:0] _schedule_T_392 = _schedule_T_380 | _schedule_T_381; // @[Mux.scala:30:73]
wire [5:0] _schedule_T_393 = _schedule_T_392 | _schedule_T_382; // @[Mux.scala:30:73]
wire [5:0] _schedule_T_394 = _schedule_T_393 | _schedule_T_383; // @[Mux.scala:30:73]
wire [5:0] _schedule_T_395 = _schedule_T_394 | _schedule_T_384; // @[Mux.scala:30:73]
wire [5:0] _schedule_T_396 = _schedule_T_395 | _schedule_T_385; // @[Mux.scala:30:73]
wire [5:0] _schedule_T_397 = _schedule_T_396 | _schedule_T_386; // @[Mux.scala:30:73]
wire [5:0] _schedule_T_398 = _schedule_T_397 | _schedule_T_387; // @[Mux.scala:30:73]
wire [5:0] _schedule_T_399 = _schedule_T_398 | _schedule_T_388; // @[Mux.scala:30:73]
wire [5:0] _schedule_T_400 = _schedule_T_399 | _schedule_T_389; // @[Mux.scala:30:73]
wire [5:0] _schedule_T_401 = _schedule_T_400 | _schedule_T_390; // @[Mux.scala:30:73]
wire [5:0] _schedule_T_402 = _schedule_T_401 | _schedule_T_391; // @[Mux.scala:30:73]
assign _schedule_WIRE_25 = _schedule_T_402; // @[Mux.scala:30:73]
assign _schedule_WIRE_20_put = _schedule_WIRE_25; // @[Mux.scala:30:73]
wire [5:0] _schedule_T_403 = _schedule_T ? _mshrs_0_io_schedule_bits_d_bits_offset : 6'h0; // @[Mux.scala:30:73, :32:36]
wire [5:0] _schedule_T_404 = _schedule_T_1 ? _mshrs_1_io_schedule_bits_d_bits_offset : 6'h0; // @[Mux.scala:30:73, :32:36]
wire [5:0] _schedule_T_405 = _schedule_T_2 ? _mshrs_2_io_schedule_bits_d_bits_offset : 6'h0; // @[Mux.scala:30:73, :32:36]
wire [5:0] _schedule_T_406 = _schedule_T_3 ? _mshrs_3_io_schedule_bits_d_bits_offset : 6'h0; // @[Mux.scala:30:73, :32:36]
wire [5:0] _schedule_T_407 = _schedule_T_4 ? _mshrs_4_io_schedule_bits_d_bits_offset : 6'h0; // @[Mux.scala:30:73, :32:36]
wire [5:0] _schedule_T_408 = _schedule_T_5 ? _mshrs_5_io_schedule_bits_d_bits_offset : 6'h0; // @[Mux.scala:30:73, :32:36]
wire [5:0] _schedule_T_409 = _schedule_T_6 ? _mshrs_6_io_schedule_bits_d_bits_offset : 6'h0; // @[Mux.scala:30:73, :32:36]
wire [5:0] _schedule_T_410 = _schedule_T_7 ? _mshrs_7_io_schedule_bits_d_bits_offset : 6'h0; // @[Mux.scala:30:73, :32:36]
wire [5:0] _schedule_T_411 = _schedule_T_8 ? _mshrs_8_io_schedule_bits_d_bits_offset : 6'h0; // @[Mux.scala:30:73, :32:36]
wire [5:0] _schedule_T_412 = _schedule_T_9 ? _mshrs_9_io_schedule_bits_d_bits_offset : 6'h0; // @[Mux.scala:30:73, :32:36]
wire [5:0] _schedule_T_413 = _schedule_T_10 ? _mshrs_10_io_schedule_bits_d_bits_offset : 6'h0; // @[Mux.scala:30:73, :32:36]
wire [5:0] _schedule_T_414 = _schedule_T_11 ? _mshrs_11_io_schedule_bits_d_bits_offset : 6'h0; // @[Mux.scala:30:73, :32:36]
wire [5:0] _schedule_T_415 = _schedule_T_403 | _schedule_T_404; // @[Mux.scala:30:73]
wire [5:0] _schedule_T_416 = _schedule_T_415 | _schedule_T_405; // @[Mux.scala:30:73]
wire [5:0] _schedule_T_417 = _schedule_T_416 | _schedule_T_406; // @[Mux.scala:30:73]
wire [5:0] _schedule_T_418 = _schedule_T_417 | _schedule_T_407; // @[Mux.scala:30:73]
wire [5:0] _schedule_T_419 = _schedule_T_418 | _schedule_T_408; // @[Mux.scala:30:73]
wire [5:0] _schedule_T_420 = _schedule_T_419 | _schedule_T_409; // @[Mux.scala:30:73]
wire [5:0] _schedule_T_421 = _schedule_T_420 | _schedule_T_410; // @[Mux.scala:30:73]
wire [5:0] _schedule_T_422 = _schedule_T_421 | _schedule_T_411; // @[Mux.scala:30:73]
wire [5:0] _schedule_T_423 = _schedule_T_422 | _schedule_T_412; // @[Mux.scala:30:73]
wire [5:0] _schedule_T_424 = _schedule_T_423 | _schedule_T_413; // @[Mux.scala:30:73]
wire [5:0] _schedule_T_425 = _schedule_T_424 | _schedule_T_414; // @[Mux.scala:30:73]
assign _schedule_WIRE_26 = _schedule_T_425; // @[Mux.scala:30:73]
assign _schedule_WIRE_20_offset = _schedule_WIRE_26; // @[Mux.scala:30:73]
wire [8:0] _schedule_T_426 = _schedule_T ? _mshrs_0_io_schedule_bits_d_bits_tag : 9'h0; // @[Mux.scala:30:73, :32:36]
wire [8:0] _schedule_T_427 = _schedule_T_1 ? _mshrs_1_io_schedule_bits_d_bits_tag : 9'h0; // @[Mux.scala:30:73, :32:36]
wire [8:0] _schedule_T_428 = _schedule_T_2 ? _mshrs_2_io_schedule_bits_d_bits_tag : 9'h0; // @[Mux.scala:30:73, :32:36]
wire [8:0] _schedule_T_429 = _schedule_T_3 ? _mshrs_3_io_schedule_bits_d_bits_tag : 9'h0; // @[Mux.scala:30:73, :32:36]
wire [8:0] _schedule_T_430 = _schedule_T_4 ? _mshrs_4_io_schedule_bits_d_bits_tag : 9'h0; // @[Mux.scala:30:73, :32:36]
wire [8:0] _schedule_T_431 = _schedule_T_5 ? _mshrs_5_io_schedule_bits_d_bits_tag : 9'h0; // @[Mux.scala:30:73, :32:36]
wire [8:0] _schedule_T_432 = _schedule_T_6 ? _mshrs_6_io_schedule_bits_d_bits_tag : 9'h0; // @[Mux.scala:30:73, :32:36]
wire [8:0] _schedule_T_433 = _schedule_T_7 ? _mshrs_7_io_schedule_bits_d_bits_tag : 9'h0; // @[Mux.scala:30:73, :32:36]
wire [8:0] _schedule_T_434 = _schedule_T_8 ? _mshrs_8_io_schedule_bits_d_bits_tag : 9'h0; // @[Mux.scala:30:73, :32:36]
wire [8:0] _schedule_T_435 = _schedule_T_9 ? _mshrs_9_io_schedule_bits_d_bits_tag : 9'h0; // @[Mux.scala:30:73, :32:36]
wire [8:0] _schedule_T_436 = _schedule_T_10 ? _mshrs_10_io_schedule_bits_d_bits_tag : 9'h0; // @[Mux.scala:30:73, :32:36]
wire [8:0] _schedule_T_437 = _schedule_T_11 ? _mshrs_11_io_schedule_bits_d_bits_tag : 9'h0; // @[Mux.scala:30:73, :32:36]
wire [8:0] _schedule_T_438 = _schedule_T_426 | _schedule_T_427; // @[Mux.scala:30:73]
wire [8:0] _schedule_T_439 = _schedule_T_438 | _schedule_T_428; // @[Mux.scala:30:73]
wire [8:0] _schedule_T_440 = _schedule_T_439 | _schedule_T_429; // @[Mux.scala:30:73]
wire [8:0] _schedule_T_441 = _schedule_T_440 | _schedule_T_430; // @[Mux.scala:30:73]
wire [8:0] _schedule_T_442 = _schedule_T_441 | _schedule_T_431; // @[Mux.scala:30:73]
wire [8:0] _schedule_T_443 = _schedule_T_442 | _schedule_T_432; // @[Mux.scala:30:73]
wire [8:0] _schedule_T_444 = _schedule_T_443 | _schedule_T_433; // @[Mux.scala:30:73]
wire [8:0] _schedule_T_445 = _schedule_T_444 | _schedule_T_434; // @[Mux.scala:30:73]
wire [8:0] _schedule_T_446 = _schedule_T_445 | _schedule_T_435; // @[Mux.scala:30:73]
wire [8:0] _schedule_T_447 = _schedule_T_446 | _schedule_T_436; // @[Mux.scala:30:73]
wire [8:0] _schedule_T_448 = _schedule_T_447 | _schedule_T_437; // @[Mux.scala:30:73]
assign _schedule_WIRE_27 = _schedule_T_448; // @[Mux.scala:30:73]
assign _schedule_WIRE_20_tag = _schedule_WIRE_27; // @[Mux.scala:30:73]
wire [5:0] _schedule_T_449 = _schedule_T ? _mshrs_0_io_schedule_bits_d_bits_source : 6'h0; // @[Mux.scala:30:73, :32:36]
wire [5:0] _schedule_T_450 = _schedule_T_1 ? _mshrs_1_io_schedule_bits_d_bits_source : 6'h0; // @[Mux.scala:30:73, :32:36]
wire [5:0] _schedule_T_451 = _schedule_T_2 ? _mshrs_2_io_schedule_bits_d_bits_source : 6'h0; // @[Mux.scala:30:73, :32:36]
wire [5:0] _schedule_T_452 = _schedule_T_3 ? _mshrs_3_io_schedule_bits_d_bits_source : 6'h0; // @[Mux.scala:30:73, :32:36]
wire [5:0] _schedule_T_453 = _schedule_T_4 ? _mshrs_4_io_schedule_bits_d_bits_source : 6'h0; // @[Mux.scala:30:73, :32:36]
wire [5:0] _schedule_T_454 = _schedule_T_5 ? _mshrs_5_io_schedule_bits_d_bits_source : 6'h0; // @[Mux.scala:30:73, :32:36]
wire [5:0] _schedule_T_455 = _schedule_T_6 ? _mshrs_6_io_schedule_bits_d_bits_source : 6'h0; // @[Mux.scala:30:73, :32:36]
wire [5:0] _schedule_T_456 = _schedule_T_7 ? _mshrs_7_io_schedule_bits_d_bits_source : 6'h0; // @[Mux.scala:30:73, :32:36]
wire [5:0] _schedule_T_457 = _schedule_T_8 ? _mshrs_8_io_schedule_bits_d_bits_source : 6'h0; // @[Mux.scala:30:73, :32:36]
wire [5:0] _schedule_T_458 = _schedule_T_9 ? _mshrs_9_io_schedule_bits_d_bits_source : 6'h0; // @[Mux.scala:30:73, :32:36]
wire [5:0] _schedule_T_459 = _schedule_T_10 ? _mshrs_10_io_schedule_bits_d_bits_source : 6'h0; // @[Mux.scala:30:73, :32:36]
wire [5:0] _schedule_T_460 = _schedule_T_11 ? _mshrs_11_io_schedule_bits_d_bits_source : 6'h0; // @[Mux.scala:30:73, :32:36]
wire [5:0] _schedule_T_461 = _schedule_T_449 | _schedule_T_450; // @[Mux.scala:30:73]
wire [5:0] _schedule_T_462 = _schedule_T_461 | _schedule_T_451; // @[Mux.scala:30:73]
wire [5:0] _schedule_T_463 = _schedule_T_462 | _schedule_T_452; // @[Mux.scala:30:73]
wire [5:0] _schedule_T_464 = _schedule_T_463 | _schedule_T_453; // @[Mux.scala:30:73]
wire [5:0] _schedule_T_465 = _schedule_T_464 | _schedule_T_454; // @[Mux.scala:30:73]
wire [5:0] _schedule_T_466 = _schedule_T_465 | _schedule_T_455; // @[Mux.scala:30:73]
wire [5:0] _schedule_T_467 = _schedule_T_466 | _schedule_T_456; // @[Mux.scala:30:73]
wire [5:0] _schedule_T_468 = _schedule_T_467 | _schedule_T_457; // @[Mux.scala:30:73]
wire [5:0] _schedule_T_469 = _schedule_T_468 | _schedule_T_458; // @[Mux.scala:30:73]
wire [5:0] _schedule_T_470 = _schedule_T_469 | _schedule_T_459; // @[Mux.scala:30:73]
wire [5:0] _schedule_T_471 = _schedule_T_470 | _schedule_T_460; // @[Mux.scala:30:73]
assign _schedule_WIRE_28 = _schedule_T_471; // @[Mux.scala:30:73]
assign _schedule_WIRE_20_source = _schedule_WIRE_28; // @[Mux.scala:30:73]
wire [2:0] _schedule_T_472 = _schedule_T ? _mshrs_0_io_schedule_bits_d_bits_size : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _schedule_T_473 = _schedule_T_1 ? _mshrs_1_io_schedule_bits_d_bits_size : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _schedule_T_474 = _schedule_T_2 ? _mshrs_2_io_schedule_bits_d_bits_size : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _schedule_T_475 = _schedule_T_3 ? _mshrs_3_io_schedule_bits_d_bits_size : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _schedule_T_476 = _schedule_T_4 ? _mshrs_4_io_schedule_bits_d_bits_size : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _schedule_T_477 = _schedule_T_5 ? _mshrs_5_io_schedule_bits_d_bits_size : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _schedule_T_478 = _schedule_T_6 ? _mshrs_6_io_schedule_bits_d_bits_size : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _schedule_T_479 = _schedule_T_7 ? _mshrs_7_io_schedule_bits_d_bits_size : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _schedule_T_480 = _schedule_T_8 ? _mshrs_8_io_schedule_bits_d_bits_size : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _schedule_T_481 = _schedule_T_9 ? _mshrs_9_io_schedule_bits_d_bits_size : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _schedule_T_482 = _schedule_T_10 ? _mshrs_10_io_schedule_bits_d_bits_size : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _schedule_T_483 = _schedule_T_11 ? _mshrs_11_io_schedule_bits_d_bits_size : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _schedule_T_484 = _schedule_T_472 | _schedule_T_473; // @[Mux.scala:30:73]
wire [2:0] _schedule_T_485 = _schedule_T_484 | _schedule_T_474; // @[Mux.scala:30:73]
wire [2:0] _schedule_T_486 = _schedule_T_485 | _schedule_T_475; // @[Mux.scala:30:73]
wire [2:0] _schedule_T_487 = _schedule_T_486 | _schedule_T_476; // @[Mux.scala:30:73]
wire [2:0] _schedule_T_488 = _schedule_T_487 | _schedule_T_477; // @[Mux.scala:30:73]
wire [2:0] _schedule_T_489 = _schedule_T_488 | _schedule_T_478; // @[Mux.scala:30:73]
wire [2:0] _schedule_T_490 = _schedule_T_489 | _schedule_T_479; // @[Mux.scala:30:73]
wire [2:0] _schedule_T_491 = _schedule_T_490 | _schedule_T_480; // @[Mux.scala:30:73]
wire [2:0] _schedule_T_492 = _schedule_T_491 | _schedule_T_481; // @[Mux.scala:30:73]
wire [2:0] _schedule_T_493 = _schedule_T_492 | _schedule_T_482; // @[Mux.scala:30:73]
wire [2:0] _schedule_T_494 = _schedule_T_493 | _schedule_T_483; // @[Mux.scala:30:73]
assign _schedule_WIRE_29 = _schedule_T_494; // @[Mux.scala:30:73]
assign _schedule_WIRE_20_size = _schedule_WIRE_29; // @[Mux.scala:30:73]
wire [2:0] _schedule_T_495 = _schedule_T ? _mshrs_0_io_schedule_bits_d_bits_param : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _schedule_T_496 = _schedule_T_1 ? _mshrs_1_io_schedule_bits_d_bits_param : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _schedule_T_497 = _schedule_T_2 ? _mshrs_2_io_schedule_bits_d_bits_param : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _schedule_T_498 = _schedule_T_3 ? _mshrs_3_io_schedule_bits_d_bits_param : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _schedule_T_499 = _schedule_T_4 ? _mshrs_4_io_schedule_bits_d_bits_param : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _schedule_T_500 = _schedule_T_5 ? _mshrs_5_io_schedule_bits_d_bits_param : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _schedule_T_501 = _schedule_T_6 ? _mshrs_6_io_schedule_bits_d_bits_param : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _schedule_T_502 = _schedule_T_7 ? _mshrs_7_io_schedule_bits_d_bits_param : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _schedule_T_503 = _schedule_T_8 ? _mshrs_8_io_schedule_bits_d_bits_param : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _schedule_T_504 = _schedule_T_9 ? _mshrs_9_io_schedule_bits_d_bits_param : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _schedule_T_505 = _schedule_T_10 ? _mshrs_10_io_schedule_bits_d_bits_param : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _schedule_T_506 = _schedule_T_11 ? _mshrs_11_io_schedule_bits_d_bits_param : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _schedule_T_507 = _schedule_T_495 | _schedule_T_496; // @[Mux.scala:30:73]
wire [2:0] _schedule_T_508 = _schedule_T_507 | _schedule_T_497; // @[Mux.scala:30:73]
wire [2:0] _schedule_T_509 = _schedule_T_508 | _schedule_T_498; // @[Mux.scala:30:73]
wire [2:0] _schedule_T_510 = _schedule_T_509 | _schedule_T_499; // @[Mux.scala:30:73]
wire [2:0] _schedule_T_511 = _schedule_T_510 | _schedule_T_500; // @[Mux.scala:30:73]
wire [2:0] _schedule_T_512 = _schedule_T_511 | _schedule_T_501; // @[Mux.scala:30:73]
wire [2:0] _schedule_T_513 = _schedule_T_512 | _schedule_T_502; // @[Mux.scala:30:73]
wire [2:0] _schedule_T_514 = _schedule_T_513 | _schedule_T_503; // @[Mux.scala:30:73]
wire [2:0] _schedule_T_515 = _schedule_T_514 | _schedule_T_504; // @[Mux.scala:30:73]
wire [2:0] _schedule_T_516 = _schedule_T_515 | _schedule_T_505; // @[Mux.scala:30:73]
wire [2:0] _schedule_T_517 = _schedule_T_516 | _schedule_T_506; // @[Mux.scala:30:73]
assign _schedule_WIRE_30 = _schedule_T_517; // @[Mux.scala:30:73]
assign _schedule_WIRE_20_param = _schedule_WIRE_30; // @[Mux.scala:30:73]
wire [2:0] _schedule_T_518 = _schedule_T ? _mshrs_0_io_schedule_bits_d_bits_opcode : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _schedule_T_519 = _schedule_T_1 ? _mshrs_1_io_schedule_bits_d_bits_opcode : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _schedule_T_520 = _schedule_T_2 ? _mshrs_2_io_schedule_bits_d_bits_opcode : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _schedule_T_521 = _schedule_T_3 ? _mshrs_3_io_schedule_bits_d_bits_opcode : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _schedule_T_522 = _schedule_T_4 ? _mshrs_4_io_schedule_bits_d_bits_opcode : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _schedule_T_523 = _schedule_T_5 ? _mshrs_5_io_schedule_bits_d_bits_opcode : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _schedule_T_524 = _schedule_T_6 ? _mshrs_6_io_schedule_bits_d_bits_opcode : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _schedule_T_525 = _schedule_T_7 ? _mshrs_7_io_schedule_bits_d_bits_opcode : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _schedule_T_526 = _schedule_T_8 ? _mshrs_8_io_schedule_bits_d_bits_opcode : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _schedule_T_527 = _schedule_T_9 ? _mshrs_9_io_schedule_bits_d_bits_opcode : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _schedule_T_528 = _schedule_T_10 ? _mshrs_10_io_schedule_bits_d_bits_opcode : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _schedule_T_529 = _schedule_T_11 ? _mshrs_11_io_schedule_bits_d_bits_opcode : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _schedule_T_530 = _schedule_T_518 | _schedule_T_519; // @[Mux.scala:30:73]
wire [2:0] _schedule_T_531 = _schedule_T_530 | _schedule_T_520; // @[Mux.scala:30:73]
wire [2:0] _schedule_T_532 = _schedule_T_531 | _schedule_T_521; // @[Mux.scala:30:73]
wire [2:0] _schedule_T_533 = _schedule_T_532 | _schedule_T_522; // @[Mux.scala:30:73]
wire [2:0] _schedule_T_534 = _schedule_T_533 | _schedule_T_523; // @[Mux.scala:30:73]
wire [2:0] _schedule_T_535 = _schedule_T_534 | _schedule_T_524; // @[Mux.scala:30:73]
wire [2:0] _schedule_T_536 = _schedule_T_535 | _schedule_T_525; // @[Mux.scala:30:73]
wire [2:0] _schedule_T_537 = _schedule_T_536 | _schedule_T_526; // @[Mux.scala:30:73]
wire [2:0] _schedule_T_538 = _schedule_T_537 | _schedule_T_527; // @[Mux.scala:30:73]
wire [2:0] _schedule_T_539 = _schedule_T_538 | _schedule_T_528; // @[Mux.scala:30:73]
wire [2:0] _schedule_T_540 = _schedule_T_539 | _schedule_T_529; // @[Mux.scala:30:73]
assign _schedule_WIRE_31 = _schedule_T_540; // @[Mux.scala:30:73]
assign _schedule_WIRE_20_opcode = _schedule_WIRE_31; // @[Mux.scala:30:73]
wire _schedule_T_541 = _schedule_T & _mshrs_0_io_schedule_bits_d_bits_control; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_542 = _schedule_T_1 & _mshrs_1_io_schedule_bits_d_bits_control; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_543 = _schedule_T_2 & _mshrs_2_io_schedule_bits_d_bits_control; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_544 = _schedule_T_3 & _mshrs_3_io_schedule_bits_d_bits_control; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_545 = _schedule_T_4 & _mshrs_4_io_schedule_bits_d_bits_control; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_546 = _schedule_T_5 & _mshrs_5_io_schedule_bits_d_bits_control; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_547 = _schedule_T_6 & _mshrs_6_io_schedule_bits_d_bits_control; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_548 = _schedule_T_7 & _mshrs_7_io_schedule_bits_d_bits_control; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_549 = _schedule_T_8 & _mshrs_8_io_schedule_bits_d_bits_control; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_550 = _schedule_T_9 & _mshrs_9_io_schedule_bits_d_bits_control; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_551 = _schedule_T_10 & _mshrs_10_io_schedule_bits_d_bits_control; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_552 = _schedule_T_11 & _mshrs_11_io_schedule_bits_d_bits_control; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_553 = _schedule_T_541 | _schedule_T_542; // @[Mux.scala:30:73]
wire _schedule_T_554 = _schedule_T_553 | _schedule_T_543; // @[Mux.scala:30:73]
wire _schedule_T_555 = _schedule_T_554 | _schedule_T_544; // @[Mux.scala:30:73]
wire _schedule_T_556 = _schedule_T_555 | _schedule_T_545; // @[Mux.scala:30:73]
wire _schedule_T_557 = _schedule_T_556 | _schedule_T_546; // @[Mux.scala:30:73]
wire _schedule_T_558 = _schedule_T_557 | _schedule_T_547; // @[Mux.scala:30:73]
wire _schedule_T_559 = _schedule_T_558 | _schedule_T_548; // @[Mux.scala:30:73]
wire _schedule_T_560 = _schedule_T_559 | _schedule_T_549; // @[Mux.scala:30:73]
wire _schedule_T_561 = _schedule_T_560 | _schedule_T_550; // @[Mux.scala:30:73]
wire _schedule_T_562 = _schedule_T_561 | _schedule_T_551; // @[Mux.scala:30:73]
wire _schedule_T_563 = _schedule_T_562 | _schedule_T_552; // @[Mux.scala:30:73]
assign _schedule_WIRE_32 = _schedule_T_563; // @[Mux.scala:30:73]
assign _schedule_WIRE_20_control = _schedule_WIRE_32; // @[Mux.scala:30:73]
wire _schedule_WIRE_34; // @[Mux.scala:30:73]
assign _schedule_WIRE_20_prio_0 = _schedule_WIRE_33_0; // @[Mux.scala:30:73]
wire _schedule_WIRE_35; // @[Mux.scala:30:73]
assign _schedule_WIRE_20_prio_1 = _schedule_WIRE_33_1; // @[Mux.scala:30:73]
wire _schedule_WIRE_36; // @[Mux.scala:30:73]
assign _schedule_WIRE_20_prio_2 = _schedule_WIRE_33_2; // @[Mux.scala:30:73]
wire _schedule_T_564 = _schedule_T & _mshrs_0_io_schedule_bits_d_bits_prio_0; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_565 = _schedule_T_1 & _mshrs_1_io_schedule_bits_d_bits_prio_0; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_566 = _schedule_T_2 & _mshrs_2_io_schedule_bits_d_bits_prio_0; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_567 = _schedule_T_3 & _mshrs_3_io_schedule_bits_d_bits_prio_0; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_568 = _schedule_T_4 & _mshrs_4_io_schedule_bits_d_bits_prio_0; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_569 = _schedule_T_5 & _mshrs_5_io_schedule_bits_d_bits_prio_0; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_570 = _schedule_T_6 & _mshrs_6_io_schedule_bits_d_bits_prio_0; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_571 = _schedule_T_7 & _mshrs_7_io_schedule_bits_d_bits_prio_0; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_572 = _schedule_T_8 & _mshrs_8_io_schedule_bits_d_bits_prio_0; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_573 = _schedule_T_9 & _mshrs_9_io_schedule_bits_d_bits_prio_0; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_576 = _schedule_T_564 | _schedule_T_565; // @[Mux.scala:30:73]
wire _schedule_T_577 = _schedule_T_576 | _schedule_T_566; // @[Mux.scala:30:73]
wire _schedule_T_578 = _schedule_T_577 | _schedule_T_567; // @[Mux.scala:30:73]
wire _schedule_T_579 = _schedule_T_578 | _schedule_T_568; // @[Mux.scala:30:73]
wire _schedule_T_580 = _schedule_T_579 | _schedule_T_569; // @[Mux.scala:30:73]
wire _schedule_T_581 = _schedule_T_580 | _schedule_T_570; // @[Mux.scala:30:73]
wire _schedule_T_582 = _schedule_T_581 | _schedule_T_571; // @[Mux.scala:30:73]
wire _schedule_T_583 = _schedule_T_582 | _schedule_T_572; // @[Mux.scala:30:73]
wire _schedule_T_584 = _schedule_T_583 | _schedule_T_573; // @[Mux.scala:30:73]
wire _schedule_T_585 = _schedule_T_584; // @[Mux.scala:30:73]
wire _schedule_T_586 = _schedule_T_585; // @[Mux.scala:30:73]
assign _schedule_WIRE_34 = _schedule_T_586; // @[Mux.scala:30:73]
assign _schedule_WIRE_33_0 = _schedule_WIRE_34; // @[Mux.scala:30:73]
wire _schedule_T_587 = _schedule_T & _mshrs_0_io_schedule_bits_d_bits_prio_1; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_588 = _schedule_T_1 & _mshrs_1_io_schedule_bits_d_bits_prio_1; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_589 = _schedule_T_2 & _mshrs_2_io_schedule_bits_d_bits_prio_1; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_590 = _schedule_T_3 & _mshrs_3_io_schedule_bits_d_bits_prio_1; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_591 = _schedule_T_4 & _mshrs_4_io_schedule_bits_d_bits_prio_1; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_592 = _schedule_T_5 & _mshrs_5_io_schedule_bits_d_bits_prio_1; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_593 = _schedule_T_6 & _mshrs_6_io_schedule_bits_d_bits_prio_1; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_594 = _schedule_T_7 & _mshrs_7_io_schedule_bits_d_bits_prio_1; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_595 = _schedule_T_8 & _mshrs_8_io_schedule_bits_d_bits_prio_1; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_596 = _schedule_T_9 & _mshrs_9_io_schedule_bits_d_bits_prio_1; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_597 = _schedule_T_10 & _mshrs_10_io_schedule_bits_d_bits_prio_1; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_599 = _schedule_T_587 | _schedule_T_588; // @[Mux.scala:30:73]
wire _schedule_T_600 = _schedule_T_599 | _schedule_T_589; // @[Mux.scala:30:73]
wire _schedule_T_601 = _schedule_T_600 | _schedule_T_590; // @[Mux.scala:30:73]
wire _schedule_T_602 = _schedule_T_601 | _schedule_T_591; // @[Mux.scala:30:73]
wire _schedule_T_603 = _schedule_T_602 | _schedule_T_592; // @[Mux.scala:30:73]
wire _schedule_T_604 = _schedule_T_603 | _schedule_T_593; // @[Mux.scala:30:73]
wire _schedule_T_605 = _schedule_T_604 | _schedule_T_594; // @[Mux.scala:30:73]
wire _schedule_T_606 = _schedule_T_605 | _schedule_T_595; // @[Mux.scala:30:73]
wire _schedule_T_607 = _schedule_T_606 | _schedule_T_596; // @[Mux.scala:30:73]
wire _schedule_T_608 = _schedule_T_607 | _schedule_T_597; // @[Mux.scala:30:73]
wire _schedule_T_609 = _schedule_T_608; // @[Mux.scala:30:73]
assign _schedule_WIRE_35 = _schedule_T_609; // @[Mux.scala:30:73]
assign _schedule_WIRE_33_1 = _schedule_WIRE_35; // @[Mux.scala:30:73]
wire _schedule_T_610 = _schedule_T & _mshrs_0_io_schedule_bits_d_bits_prio_2; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_611 = _schedule_T_1 & _mshrs_1_io_schedule_bits_d_bits_prio_2; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_612 = _schedule_T_2 & _mshrs_2_io_schedule_bits_d_bits_prio_2; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_613 = _schedule_T_3 & _mshrs_3_io_schedule_bits_d_bits_prio_2; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_614 = _schedule_T_4 & _mshrs_4_io_schedule_bits_d_bits_prio_2; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_615 = _schedule_T_5 & _mshrs_5_io_schedule_bits_d_bits_prio_2; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_616 = _schedule_T_6 & _mshrs_6_io_schedule_bits_d_bits_prio_2; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_617 = _schedule_T_7 & _mshrs_7_io_schedule_bits_d_bits_prio_2; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_618 = _schedule_T_8 & _mshrs_8_io_schedule_bits_d_bits_prio_2; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_619 = _schedule_T_9 & _mshrs_9_io_schedule_bits_d_bits_prio_2; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_620 = _schedule_T_10 & _mshrs_10_io_schedule_bits_d_bits_prio_2; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_621 = _schedule_T_11 & _mshrs_11_io_schedule_bits_d_bits_prio_2; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_622 = _schedule_T_610 | _schedule_T_611; // @[Mux.scala:30:73]
wire _schedule_T_623 = _schedule_T_622 | _schedule_T_612; // @[Mux.scala:30:73]
wire _schedule_T_624 = _schedule_T_623 | _schedule_T_613; // @[Mux.scala:30:73]
wire _schedule_T_625 = _schedule_T_624 | _schedule_T_614; // @[Mux.scala:30:73]
wire _schedule_T_626 = _schedule_T_625 | _schedule_T_615; // @[Mux.scala:30:73]
wire _schedule_T_627 = _schedule_T_626 | _schedule_T_616; // @[Mux.scala:30:73]
wire _schedule_T_628 = _schedule_T_627 | _schedule_T_617; // @[Mux.scala:30:73]
wire _schedule_T_629 = _schedule_T_628 | _schedule_T_618; // @[Mux.scala:30:73]
wire _schedule_T_630 = _schedule_T_629 | _schedule_T_619; // @[Mux.scala:30:73]
wire _schedule_T_631 = _schedule_T_630 | _schedule_T_620; // @[Mux.scala:30:73]
wire _schedule_T_632 = _schedule_T_631 | _schedule_T_621; // @[Mux.scala:30:73]
assign _schedule_WIRE_36 = _schedule_T_632; // @[Mux.scala:30:73]
assign _schedule_WIRE_33_2 = _schedule_WIRE_36; // @[Mux.scala:30:73]
wire _schedule_T_633 = _schedule_T & _mshrs_0_io_schedule_bits_d_valid; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_634 = _schedule_T_1 & _mshrs_1_io_schedule_bits_d_valid; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_635 = _schedule_T_2 & _mshrs_2_io_schedule_bits_d_valid; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_636 = _schedule_T_3 & _mshrs_3_io_schedule_bits_d_valid; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_637 = _schedule_T_4 & _mshrs_4_io_schedule_bits_d_valid; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_638 = _schedule_T_5 & _mshrs_5_io_schedule_bits_d_valid; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_639 = _schedule_T_6 & _mshrs_6_io_schedule_bits_d_valid; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_640 = _schedule_T_7 & _mshrs_7_io_schedule_bits_d_valid; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_641 = _schedule_T_8 & _mshrs_8_io_schedule_bits_d_valid; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_642 = _schedule_T_9 & _mshrs_9_io_schedule_bits_d_valid; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_643 = _schedule_T_10 & _mshrs_10_io_schedule_bits_d_valid; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_644 = _schedule_T_11 & _mshrs_11_io_schedule_bits_d_valid; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_645 = _schedule_T_633 | _schedule_T_634; // @[Mux.scala:30:73]
wire _schedule_T_646 = _schedule_T_645 | _schedule_T_635; // @[Mux.scala:30:73]
wire _schedule_T_647 = _schedule_T_646 | _schedule_T_636; // @[Mux.scala:30:73]
wire _schedule_T_648 = _schedule_T_647 | _schedule_T_637; // @[Mux.scala:30:73]
wire _schedule_T_649 = _schedule_T_648 | _schedule_T_638; // @[Mux.scala:30:73]
wire _schedule_T_650 = _schedule_T_649 | _schedule_T_639; // @[Mux.scala:30:73]
wire _schedule_T_651 = _schedule_T_650 | _schedule_T_640; // @[Mux.scala:30:73]
wire _schedule_T_652 = _schedule_T_651 | _schedule_T_641; // @[Mux.scala:30:73]
wire _schedule_T_653 = _schedule_T_652 | _schedule_T_642; // @[Mux.scala:30:73]
wire _schedule_T_654 = _schedule_T_653 | _schedule_T_643; // @[Mux.scala:30:73]
wire _schedule_T_655 = _schedule_T_654 | _schedule_T_644; // @[Mux.scala:30:73]
assign _schedule_WIRE_37 = _schedule_T_655; // @[Mux.scala:30:73]
assign _schedule_WIRE_19_valid = _schedule_WIRE_37; // @[Mux.scala:30:73]
wire _schedule_WIRE_47; // @[Mux.scala:30:73]
assign schedule_c_valid = _schedule_WIRE_38_valid; // @[Mux.scala:30:73]
wire [2:0] _schedule_WIRE_39_opcode; // @[Mux.scala:30:73]
assign schedule_c_bits_opcode = _schedule_WIRE_38_bits_opcode; // @[Mux.scala:30:73]
wire [2:0] _schedule_WIRE_39_param; // @[Mux.scala:30:73]
assign schedule_c_bits_param = _schedule_WIRE_38_bits_param; // @[Mux.scala:30:73]
wire [8:0] _schedule_WIRE_39_tag; // @[Mux.scala:30:73]
assign schedule_c_bits_tag = _schedule_WIRE_38_bits_tag; // @[Mux.scala:30:73]
wire [10:0] _schedule_WIRE_39_set; // @[Mux.scala:30:73]
assign schedule_c_bits_set = _schedule_WIRE_38_bits_set; // @[Mux.scala:30:73]
wire [3:0] _schedule_WIRE_39_way; // @[Mux.scala:30:73]
assign schedule_c_bits_way = _schedule_WIRE_38_bits_way; // @[Mux.scala:30:73]
wire _schedule_WIRE_39_dirty; // @[Mux.scala:30:73]
assign schedule_c_bits_dirty = _schedule_WIRE_38_bits_dirty; // @[Mux.scala:30:73]
wire [2:0] _schedule_WIRE_46; // @[Mux.scala:30:73]
assign _schedule_WIRE_38_bits_opcode = _schedule_WIRE_39_opcode; // @[Mux.scala:30:73]
wire [2:0] _schedule_WIRE_45; // @[Mux.scala:30:73]
assign _schedule_WIRE_38_bits_param = _schedule_WIRE_39_param; // @[Mux.scala:30:73]
wire [8:0] _schedule_WIRE_43; // @[Mux.scala:30:73]
assign _schedule_WIRE_38_bits_tag = _schedule_WIRE_39_tag; // @[Mux.scala:30:73]
wire [10:0] _schedule_WIRE_42; // @[Mux.scala:30:73]
assign _schedule_WIRE_38_bits_set = _schedule_WIRE_39_set; // @[Mux.scala:30:73]
wire [3:0] _schedule_WIRE_41; // @[Mux.scala:30:73]
assign _schedule_WIRE_38_bits_way = _schedule_WIRE_39_way; // @[Mux.scala:30:73]
wire _schedule_WIRE_40; // @[Mux.scala:30:73]
assign _schedule_WIRE_38_bits_dirty = _schedule_WIRE_39_dirty; // @[Mux.scala:30:73]
wire _schedule_T_656 = _schedule_T & _mshrs_0_io_schedule_bits_c_bits_dirty; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_657 = _schedule_T_1 & _mshrs_1_io_schedule_bits_c_bits_dirty; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_658 = _schedule_T_2 & _mshrs_2_io_schedule_bits_c_bits_dirty; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_659 = _schedule_T_3 & _mshrs_3_io_schedule_bits_c_bits_dirty; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_660 = _schedule_T_4 & _mshrs_4_io_schedule_bits_c_bits_dirty; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_661 = _schedule_T_5 & _mshrs_5_io_schedule_bits_c_bits_dirty; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_662 = _schedule_T_6 & _mshrs_6_io_schedule_bits_c_bits_dirty; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_663 = _schedule_T_7 & _mshrs_7_io_schedule_bits_c_bits_dirty; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_664 = _schedule_T_8 & _mshrs_8_io_schedule_bits_c_bits_dirty; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_665 = _schedule_T_9 & _mshrs_9_io_schedule_bits_c_bits_dirty; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_666 = _schedule_T_10 & _mshrs_10_io_schedule_bits_c_bits_dirty; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_667 = _schedule_T_11 & _mshrs_11_io_schedule_bits_c_bits_dirty; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_668 = _schedule_T_656 | _schedule_T_657; // @[Mux.scala:30:73]
wire _schedule_T_669 = _schedule_T_668 | _schedule_T_658; // @[Mux.scala:30:73]
wire _schedule_T_670 = _schedule_T_669 | _schedule_T_659; // @[Mux.scala:30:73]
wire _schedule_T_671 = _schedule_T_670 | _schedule_T_660; // @[Mux.scala:30:73]
wire _schedule_T_672 = _schedule_T_671 | _schedule_T_661; // @[Mux.scala:30:73]
wire _schedule_T_673 = _schedule_T_672 | _schedule_T_662; // @[Mux.scala:30:73]
wire _schedule_T_674 = _schedule_T_673 | _schedule_T_663; // @[Mux.scala:30:73]
wire _schedule_T_675 = _schedule_T_674 | _schedule_T_664; // @[Mux.scala:30:73]
wire _schedule_T_676 = _schedule_T_675 | _schedule_T_665; // @[Mux.scala:30:73]
wire _schedule_T_677 = _schedule_T_676 | _schedule_T_666; // @[Mux.scala:30:73]
wire _schedule_T_678 = _schedule_T_677 | _schedule_T_667; // @[Mux.scala:30:73]
assign _schedule_WIRE_40 = _schedule_T_678; // @[Mux.scala:30:73]
assign _schedule_WIRE_39_dirty = _schedule_WIRE_40; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_679 = _schedule_T ? _mshrs_0_io_schedule_bits_c_bits_way : 4'h0; // @[Mux.scala:30:73, :32:36]
wire [3:0] _schedule_T_680 = _schedule_T_1 ? _mshrs_1_io_schedule_bits_c_bits_way : 4'h0; // @[Mux.scala:30:73, :32:36]
wire [3:0] _schedule_T_681 = _schedule_T_2 ? _mshrs_2_io_schedule_bits_c_bits_way : 4'h0; // @[Mux.scala:30:73, :32:36]
wire [3:0] _schedule_T_682 = _schedule_T_3 ? _mshrs_3_io_schedule_bits_c_bits_way : 4'h0; // @[Mux.scala:30:73, :32:36]
wire [3:0] _schedule_T_683 = _schedule_T_4 ? _mshrs_4_io_schedule_bits_c_bits_way : 4'h0; // @[Mux.scala:30:73, :32:36]
wire [3:0] _schedule_T_684 = _schedule_T_5 ? _mshrs_5_io_schedule_bits_c_bits_way : 4'h0; // @[Mux.scala:30:73, :32:36]
wire [3:0] _schedule_T_685 = _schedule_T_6 ? _mshrs_6_io_schedule_bits_c_bits_way : 4'h0; // @[Mux.scala:30:73, :32:36]
wire [3:0] _schedule_T_686 = _schedule_T_7 ? _mshrs_7_io_schedule_bits_c_bits_way : 4'h0; // @[Mux.scala:30:73, :32:36]
wire [3:0] _schedule_T_687 = _schedule_T_8 ? _mshrs_8_io_schedule_bits_c_bits_way : 4'h0; // @[Mux.scala:30:73, :32:36]
wire [3:0] _schedule_T_688 = _schedule_T_9 ? _mshrs_9_io_schedule_bits_c_bits_way : 4'h0; // @[Mux.scala:30:73, :32:36]
wire [3:0] _schedule_T_689 = _schedule_T_10 ? _mshrs_10_io_schedule_bits_c_bits_way : 4'h0; // @[Mux.scala:30:73, :32:36]
wire [3:0] _schedule_T_690 = _schedule_T_11 ? _mshrs_11_io_schedule_bits_c_bits_way : 4'h0; // @[Mux.scala:30:73, :32:36]
wire [3:0] _schedule_T_691 = _schedule_T_679 | _schedule_T_680; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_692 = _schedule_T_691 | _schedule_T_681; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_693 = _schedule_T_692 | _schedule_T_682; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_694 = _schedule_T_693 | _schedule_T_683; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_695 = _schedule_T_694 | _schedule_T_684; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_696 = _schedule_T_695 | _schedule_T_685; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_697 = _schedule_T_696 | _schedule_T_686; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_698 = _schedule_T_697 | _schedule_T_687; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_699 = _schedule_T_698 | _schedule_T_688; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_700 = _schedule_T_699 | _schedule_T_689; // @[Mux.scala:30:73]
wire [3:0] _schedule_T_701 = _schedule_T_700 | _schedule_T_690; // @[Mux.scala:30:73]
assign _schedule_WIRE_41 = _schedule_T_701; // @[Mux.scala:30:73]
assign _schedule_WIRE_39_way = _schedule_WIRE_41; // @[Mux.scala:30:73]
wire [10:0] _schedule_T_702 = _schedule_T ? _mshrs_0_io_schedule_bits_c_bits_set : 11'h0; // @[Mux.scala:30:73, :32:36]
wire [10:0] _schedule_T_703 = _schedule_T_1 ? _mshrs_1_io_schedule_bits_c_bits_set : 11'h0; // @[Mux.scala:30:73, :32:36]
wire [10:0] _schedule_T_704 = _schedule_T_2 ? _mshrs_2_io_schedule_bits_c_bits_set : 11'h0; // @[Mux.scala:30:73, :32:36]
wire [10:0] _schedule_T_705 = _schedule_T_3 ? _mshrs_3_io_schedule_bits_c_bits_set : 11'h0; // @[Mux.scala:30:73, :32:36]
wire [10:0] _schedule_T_706 = _schedule_T_4 ? _mshrs_4_io_schedule_bits_c_bits_set : 11'h0; // @[Mux.scala:30:73, :32:36]
wire [10:0] _schedule_T_707 = _schedule_T_5 ? _mshrs_5_io_schedule_bits_c_bits_set : 11'h0; // @[Mux.scala:30:73, :32:36]
wire [10:0] _schedule_T_708 = _schedule_T_6 ? _mshrs_6_io_schedule_bits_c_bits_set : 11'h0; // @[Mux.scala:30:73, :32:36]
wire [10:0] _schedule_T_709 = _schedule_T_7 ? _mshrs_7_io_schedule_bits_c_bits_set : 11'h0; // @[Mux.scala:30:73, :32:36]
wire [10:0] _schedule_T_710 = _schedule_T_8 ? _mshrs_8_io_schedule_bits_c_bits_set : 11'h0; // @[Mux.scala:30:73, :32:36]
wire [10:0] _schedule_T_711 = _schedule_T_9 ? _mshrs_9_io_schedule_bits_c_bits_set : 11'h0; // @[Mux.scala:30:73, :32:36]
wire [10:0] _schedule_T_712 = _schedule_T_10 ? _mshrs_10_io_schedule_bits_c_bits_set : 11'h0; // @[Mux.scala:30:73, :32:36]
wire [10:0] _schedule_T_713 = _schedule_T_11 ? _mshrs_11_io_schedule_bits_c_bits_set : 11'h0; // @[Mux.scala:30:73, :32:36]
wire [10:0] _schedule_T_714 = _schedule_T_702 | _schedule_T_703; // @[Mux.scala:30:73]
wire [10:0] _schedule_T_715 = _schedule_T_714 | _schedule_T_704; // @[Mux.scala:30:73]
wire [10:0] _schedule_T_716 = _schedule_T_715 | _schedule_T_705; // @[Mux.scala:30:73]
wire [10:0] _schedule_T_717 = _schedule_T_716 | _schedule_T_706; // @[Mux.scala:30:73]
wire [10:0] _schedule_T_718 = _schedule_T_717 | _schedule_T_707; // @[Mux.scala:30:73]
wire [10:0] _schedule_T_719 = _schedule_T_718 | _schedule_T_708; // @[Mux.scala:30:73]
wire [10:0] _schedule_T_720 = _schedule_T_719 | _schedule_T_709; // @[Mux.scala:30:73]
wire [10:0] _schedule_T_721 = _schedule_T_720 | _schedule_T_710; // @[Mux.scala:30:73]
wire [10:0] _schedule_T_722 = _schedule_T_721 | _schedule_T_711; // @[Mux.scala:30:73]
wire [10:0] _schedule_T_723 = _schedule_T_722 | _schedule_T_712; // @[Mux.scala:30:73]
wire [10:0] _schedule_T_724 = _schedule_T_723 | _schedule_T_713; // @[Mux.scala:30:73]
assign _schedule_WIRE_42 = _schedule_T_724; // @[Mux.scala:30:73]
assign _schedule_WIRE_39_set = _schedule_WIRE_42; // @[Mux.scala:30:73]
wire [8:0] _schedule_T_725 = _schedule_T ? _mshrs_0_io_schedule_bits_c_bits_tag : 9'h0; // @[Mux.scala:30:73, :32:36]
wire [8:0] _schedule_T_726 = _schedule_T_1 ? _mshrs_1_io_schedule_bits_c_bits_tag : 9'h0; // @[Mux.scala:30:73, :32:36]
wire [8:0] _schedule_T_727 = _schedule_T_2 ? _mshrs_2_io_schedule_bits_c_bits_tag : 9'h0; // @[Mux.scala:30:73, :32:36]
wire [8:0] _schedule_T_728 = _schedule_T_3 ? _mshrs_3_io_schedule_bits_c_bits_tag : 9'h0; // @[Mux.scala:30:73, :32:36]
wire [8:0] _schedule_T_729 = _schedule_T_4 ? _mshrs_4_io_schedule_bits_c_bits_tag : 9'h0; // @[Mux.scala:30:73, :32:36]
wire [8:0] _schedule_T_730 = _schedule_T_5 ? _mshrs_5_io_schedule_bits_c_bits_tag : 9'h0; // @[Mux.scala:30:73, :32:36]
wire [8:0] _schedule_T_731 = _schedule_T_6 ? _mshrs_6_io_schedule_bits_c_bits_tag : 9'h0; // @[Mux.scala:30:73, :32:36]
wire [8:0] _schedule_T_732 = _schedule_T_7 ? _mshrs_7_io_schedule_bits_c_bits_tag : 9'h0; // @[Mux.scala:30:73, :32:36]
wire [8:0] _schedule_T_733 = _schedule_T_8 ? _mshrs_8_io_schedule_bits_c_bits_tag : 9'h0; // @[Mux.scala:30:73, :32:36]
wire [8:0] _schedule_T_734 = _schedule_T_9 ? _mshrs_9_io_schedule_bits_c_bits_tag : 9'h0; // @[Mux.scala:30:73, :32:36]
wire [8:0] _schedule_T_735 = _schedule_T_10 ? _mshrs_10_io_schedule_bits_c_bits_tag : 9'h0; // @[Mux.scala:30:73, :32:36]
wire [8:0] _schedule_T_736 = _schedule_T_11 ? _mshrs_11_io_schedule_bits_c_bits_tag : 9'h0; // @[Mux.scala:30:73, :32:36]
wire [8:0] _schedule_T_737 = _schedule_T_725 | _schedule_T_726; // @[Mux.scala:30:73]
wire [8:0] _schedule_T_738 = _schedule_T_737 | _schedule_T_727; // @[Mux.scala:30:73]
wire [8:0] _schedule_T_739 = _schedule_T_738 | _schedule_T_728; // @[Mux.scala:30:73]
wire [8:0] _schedule_T_740 = _schedule_T_739 | _schedule_T_729; // @[Mux.scala:30:73]
wire [8:0] _schedule_T_741 = _schedule_T_740 | _schedule_T_730; // @[Mux.scala:30:73]
wire [8:0] _schedule_T_742 = _schedule_T_741 | _schedule_T_731; // @[Mux.scala:30:73]
wire [8:0] _schedule_T_743 = _schedule_T_742 | _schedule_T_732; // @[Mux.scala:30:73]
wire [8:0] _schedule_T_744 = _schedule_T_743 | _schedule_T_733; // @[Mux.scala:30:73]
wire [8:0] _schedule_T_745 = _schedule_T_744 | _schedule_T_734; // @[Mux.scala:30:73]
wire [8:0] _schedule_T_746 = _schedule_T_745 | _schedule_T_735; // @[Mux.scala:30:73]
wire [8:0] _schedule_T_747 = _schedule_T_746 | _schedule_T_736; // @[Mux.scala:30:73]
assign _schedule_WIRE_43 = _schedule_T_747; // @[Mux.scala:30:73]
assign _schedule_WIRE_39_tag = _schedule_WIRE_43; // @[Mux.scala:30:73]
wire [2:0] _schedule_T_771 = _schedule_T ? _mshrs_0_io_schedule_bits_c_bits_param : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _schedule_T_772 = _schedule_T_1 ? _mshrs_1_io_schedule_bits_c_bits_param : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _schedule_T_773 = _schedule_T_2 ? _mshrs_2_io_schedule_bits_c_bits_param : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _schedule_T_774 = _schedule_T_3 ? _mshrs_3_io_schedule_bits_c_bits_param : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _schedule_T_775 = _schedule_T_4 ? _mshrs_4_io_schedule_bits_c_bits_param : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _schedule_T_776 = _schedule_T_5 ? _mshrs_5_io_schedule_bits_c_bits_param : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _schedule_T_777 = _schedule_T_6 ? _mshrs_6_io_schedule_bits_c_bits_param : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _schedule_T_778 = _schedule_T_7 ? _mshrs_7_io_schedule_bits_c_bits_param : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _schedule_T_779 = _schedule_T_8 ? _mshrs_8_io_schedule_bits_c_bits_param : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _schedule_T_780 = _schedule_T_9 ? _mshrs_9_io_schedule_bits_c_bits_param : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _schedule_T_781 = _schedule_T_10 ? _mshrs_10_io_schedule_bits_c_bits_param : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _schedule_T_782 = _schedule_T_11 ? _mshrs_11_io_schedule_bits_c_bits_param : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _schedule_T_783 = _schedule_T_771 | _schedule_T_772; // @[Mux.scala:30:73]
wire [2:0] _schedule_T_784 = _schedule_T_783 | _schedule_T_773; // @[Mux.scala:30:73]
wire [2:0] _schedule_T_785 = _schedule_T_784 | _schedule_T_774; // @[Mux.scala:30:73]
wire [2:0] _schedule_T_786 = _schedule_T_785 | _schedule_T_775; // @[Mux.scala:30:73]
wire [2:0] _schedule_T_787 = _schedule_T_786 | _schedule_T_776; // @[Mux.scala:30:73]
wire [2:0] _schedule_T_788 = _schedule_T_787 | _schedule_T_777; // @[Mux.scala:30:73]
wire [2:0] _schedule_T_789 = _schedule_T_788 | _schedule_T_778; // @[Mux.scala:30:73]
wire [2:0] _schedule_T_790 = _schedule_T_789 | _schedule_T_779; // @[Mux.scala:30:73]
wire [2:0] _schedule_T_791 = _schedule_T_790 | _schedule_T_780; // @[Mux.scala:30:73]
wire [2:0] _schedule_T_792 = _schedule_T_791 | _schedule_T_781; // @[Mux.scala:30:73]
wire [2:0] _schedule_T_793 = _schedule_T_792 | _schedule_T_782; // @[Mux.scala:30:73]
assign _schedule_WIRE_45 = _schedule_T_793; // @[Mux.scala:30:73]
assign _schedule_WIRE_39_param = _schedule_WIRE_45; // @[Mux.scala:30:73]
wire [2:0] _schedule_T_794 = _schedule_T ? _mshrs_0_io_schedule_bits_c_bits_opcode : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _schedule_T_795 = _schedule_T_1 ? _mshrs_1_io_schedule_bits_c_bits_opcode : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _schedule_T_796 = _schedule_T_2 ? _mshrs_2_io_schedule_bits_c_bits_opcode : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _schedule_T_797 = _schedule_T_3 ? _mshrs_3_io_schedule_bits_c_bits_opcode : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _schedule_T_798 = _schedule_T_4 ? _mshrs_4_io_schedule_bits_c_bits_opcode : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _schedule_T_799 = _schedule_T_5 ? _mshrs_5_io_schedule_bits_c_bits_opcode : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _schedule_T_800 = _schedule_T_6 ? _mshrs_6_io_schedule_bits_c_bits_opcode : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _schedule_T_801 = _schedule_T_7 ? _mshrs_7_io_schedule_bits_c_bits_opcode : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _schedule_T_802 = _schedule_T_8 ? _mshrs_8_io_schedule_bits_c_bits_opcode : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _schedule_T_803 = _schedule_T_9 ? _mshrs_9_io_schedule_bits_c_bits_opcode : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _schedule_T_804 = _schedule_T_10 ? _mshrs_10_io_schedule_bits_c_bits_opcode : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _schedule_T_805 = _schedule_T_11 ? _mshrs_11_io_schedule_bits_c_bits_opcode : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _schedule_T_806 = _schedule_T_794 | _schedule_T_795; // @[Mux.scala:30:73]
wire [2:0] _schedule_T_807 = _schedule_T_806 | _schedule_T_796; // @[Mux.scala:30:73]
wire [2:0] _schedule_T_808 = _schedule_T_807 | _schedule_T_797; // @[Mux.scala:30:73]
wire [2:0] _schedule_T_809 = _schedule_T_808 | _schedule_T_798; // @[Mux.scala:30:73]
wire [2:0] _schedule_T_810 = _schedule_T_809 | _schedule_T_799; // @[Mux.scala:30:73]
wire [2:0] _schedule_T_811 = _schedule_T_810 | _schedule_T_800; // @[Mux.scala:30:73]
wire [2:0] _schedule_T_812 = _schedule_T_811 | _schedule_T_801; // @[Mux.scala:30:73]
wire [2:0] _schedule_T_813 = _schedule_T_812 | _schedule_T_802; // @[Mux.scala:30:73]
wire [2:0] _schedule_T_814 = _schedule_T_813 | _schedule_T_803; // @[Mux.scala:30:73]
wire [2:0] _schedule_T_815 = _schedule_T_814 | _schedule_T_804; // @[Mux.scala:30:73]
wire [2:0] _schedule_T_816 = _schedule_T_815 | _schedule_T_805; // @[Mux.scala:30:73]
assign _schedule_WIRE_46 = _schedule_T_816; // @[Mux.scala:30:73]
assign _schedule_WIRE_39_opcode = _schedule_WIRE_46; // @[Mux.scala:30:73]
wire _schedule_T_817 = _schedule_T & _mshrs_0_io_schedule_bits_c_valid; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_818 = _schedule_T_1 & _mshrs_1_io_schedule_bits_c_valid; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_819 = _schedule_T_2 & _mshrs_2_io_schedule_bits_c_valid; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_820 = _schedule_T_3 & _mshrs_3_io_schedule_bits_c_valid; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_821 = _schedule_T_4 & _mshrs_4_io_schedule_bits_c_valid; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_822 = _schedule_T_5 & _mshrs_5_io_schedule_bits_c_valid; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_823 = _schedule_T_6 & _mshrs_6_io_schedule_bits_c_valid; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_824 = _schedule_T_7 & _mshrs_7_io_schedule_bits_c_valid; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_825 = _schedule_T_8 & _mshrs_8_io_schedule_bits_c_valid; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_826 = _schedule_T_9 & _mshrs_9_io_schedule_bits_c_valid; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_827 = _schedule_T_10 & _mshrs_10_io_schedule_bits_c_valid; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_828 = _schedule_T_11 & _mshrs_11_io_schedule_bits_c_valid; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_829 = _schedule_T_817 | _schedule_T_818; // @[Mux.scala:30:73]
wire _schedule_T_830 = _schedule_T_829 | _schedule_T_819; // @[Mux.scala:30:73]
wire _schedule_T_831 = _schedule_T_830 | _schedule_T_820; // @[Mux.scala:30:73]
wire _schedule_T_832 = _schedule_T_831 | _schedule_T_821; // @[Mux.scala:30:73]
wire _schedule_T_833 = _schedule_T_832 | _schedule_T_822; // @[Mux.scala:30:73]
wire _schedule_T_834 = _schedule_T_833 | _schedule_T_823; // @[Mux.scala:30:73]
wire _schedule_T_835 = _schedule_T_834 | _schedule_T_824; // @[Mux.scala:30:73]
wire _schedule_T_836 = _schedule_T_835 | _schedule_T_825; // @[Mux.scala:30:73]
wire _schedule_T_837 = _schedule_T_836 | _schedule_T_826; // @[Mux.scala:30:73]
wire _schedule_T_838 = _schedule_T_837 | _schedule_T_827; // @[Mux.scala:30:73]
wire _schedule_T_839 = _schedule_T_838 | _schedule_T_828; // @[Mux.scala:30:73]
assign _schedule_WIRE_47 = _schedule_T_839; // @[Mux.scala:30:73]
assign _schedule_WIRE_38_valid = _schedule_WIRE_47; // @[Mux.scala:30:73]
wire _schedule_WIRE_54; // @[Mux.scala:30:73]
assign schedule_b_valid = _schedule_WIRE_48_valid; // @[Mux.scala:30:73]
wire [2:0] _schedule_WIRE_49_param; // @[Mux.scala:30:73]
assign schedule_b_bits_param = _schedule_WIRE_48_bits_param; // @[Mux.scala:30:73]
wire [8:0] _schedule_WIRE_49_tag; // @[Mux.scala:30:73]
assign schedule_b_bits_tag = _schedule_WIRE_48_bits_tag; // @[Mux.scala:30:73]
wire [10:0] _schedule_WIRE_49_set; // @[Mux.scala:30:73]
assign schedule_b_bits_set = _schedule_WIRE_48_bits_set; // @[Mux.scala:30:73]
wire _schedule_WIRE_49_clients; // @[Mux.scala:30:73]
assign schedule_b_bits_clients = _schedule_WIRE_48_bits_clients; // @[Mux.scala:30:73]
wire [2:0] _schedule_WIRE_53; // @[Mux.scala:30:73]
assign _schedule_WIRE_48_bits_param = _schedule_WIRE_49_param; // @[Mux.scala:30:73]
wire [8:0] _schedule_WIRE_52; // @[Mux.scala:30:73]
assign _schedule_WIRE_48_bits_tag = _schedule_WIRE_49_tag; // @[Mux.scala:30:73]
wire [10:0] _schedule_WIRE_51; // @[Mux.scala:30:73]
assign _schedule_WIRE_48_bits_set = _schedule_WIRE_49_set; // @[Mux.scala:30:73]
wire _schedule_WIRE_50; // @[Mux.scala:30:73]
assign _schedule_WIRE_48_bits_clients = _schedule_WIRE_49_clients; // @[Mux.scala:30:73]
wire _schedule_T_840 = _schedule_T & _mshrs_0_io_schedule_bits_b_bits_clients; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_841 = _schedule_T_1 & _mshrs_1_io_schedule_bits_b_bits_clients; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_842 = _schedule_T_2 & _mshrs_2_io_schedule_bits_b_bits_clients; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_843 = _schedule_T_3 & _mshrs_3_io_schedule_bits_b_bits_clients; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_844 = _schedule_T_4 & _mshrs_4_io_schedule_bits_b_bits_clients; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_845 = _schedule_T_5 & _mshrs_5_io_schedule_bits_b_bits_clients; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_846 = _schedule_T_6 & _mshrs_6_io_schedule_bits_b_bits_clients; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_847 = _schedule_T_7 & _mshrs_7_io_schedule_bits_b_bits_clients; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_848 = _schedule_T_8 & _mshrs_8_io_schedule_bits_b_bits_clients; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_849 = _schedule_T_9 & _mshrs_9_io_schedule_bits_b_bits_clients; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_850 = _schedule_T_10 & _mshrs_10_io_schedule_bits_b_bits_clients; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_851 = _schedule_T_11 & _mshrs_11_io_schedule_bits_b_bits_clients; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_852 = _schedule_T_840 | _schedule_T_841; // @[Mux.scala:30:73]
wire _schedule_T_853 = _schedule_T_852 | _schedule_T_842; // @[Mux.scala:30:73]
wire _schedule_T_854 = _schedule_T_853 | _schedule_T_843; // @[Mux.scala:30:73]
wire _schedule_T_855 = _schedule_T_854 | _schedule_T_844; // @[Mux.scala:30:73]
wire _schedule_T_856 = _schedule_T_855 | _schedule_T_845; // @[Mux.scala:30:73]
wire _schedule_T_857 = _schedule_T_856 | _schedule_T_846; // @[Mux.scala:30:73]
wire _schedule_T_858 = _schedule_T_857 | _schedule_T_847; // @[Mux.scala:30:73]
wire _schedule_T_859 = _schedule_T_858 | _schedule_T_848; // @[Mux.scala:30:73]
wire _schedule_T_860 = _schedule_T_859 | _schedule_T_849; // @[Mux.scala:30:73]
wire _schedule_T_861 = _schedule_T_860 | _schedule_T_850; // @[Mux.scala:30:73]
wire _schedule_T_862 = _schedule_T_861 | _schedule_T_851; // @[Mux.scala:30:73]
assign _schedule_WIRE_50 = _schedule_T_862; // @[Mux.scala:30:73]
assign _schedule_WIRE_49_clients = _schedule_WIRE_50; // @[Mux.scala:30:73]
wire [10:0] _schedule_T_863 = _schedule_T ? _mshrs_0_io_schedule_bits_b_bits_set : 11'h0; // @[Mux.scala:30:73, :32:36]
wire [10:0] _schedule_T_864 = _schedule_T_1 ? _mshrs_1_io_schedule_bits_b_bits_set : 11'h0; // @[Mux.scala:30:73, :32:36]
wire [10:0] _schedule_T_865 = _schedule_T_2 ? _mshrs_2_io_schedule_bits_b_bits_set : 11'h0; // @[Mux.scala:30:73, :32:36]
wire [10:0] _schedule_T_866 = _schedule_T_3 ? _mshrs_3_io_schedule_bits_b_bits_set : 11'h0; // @[Mux.scala:30:73, :32:36]
wire [10:0] _schedule_T_867 = _schedule_T_4 ? _mshrs_4_io_schedule_bits_b_bits_set : 11'h0; // @[Mux.scala:30:73, :32:36]
wire [10:0] _schedule_T_868 = _schedule_T_5 ? _mshrs_5_io_schedule_bits_b_bits_set : 11'h0; // @[Mux.scala:30:73, :32:36]
wire [10:0] _schedule_T_869 = _schedule_T_6 ? _mshrs_6_io_schedule_bits_b_bits_set : 11'h0; // @[Mux.scala:30:73, :32:36]
wire [10:0] _schedule_T_870 = _schedule_T_7 ? _mshrs_7_io_schedule_bits_b_bits_set : 11'h0; // @[Mux.scala:30:73, :32:36]
wire [10:0] _schedule_T_871 = _schedule_T_8 ? _mshrs_8_io_schedule_bits_b_bits_set : 11'h0; // @[Mux.scala:30:73, :32:36]
wire [10:0] _schedule_T_872 = _schedule_T_9 ? _mshrs_9_io_schedule_bits_b_bits_set : 11'h0; // @[Mux.scala:30:73, :32:36]
wire [10:0] _schedule_T_873 = _schedule_T_10 ? _mshrs_10_io_schedule_bits_b_bits_set : 11'h0; // @[Mux.scala:30:73, :32:36]
wire [10:0] _schedule_T_874 = _schedule_T_11 ? _mshrs_11_io_schedule_bits_b_bits_set : 11'h0; // @[Mux.scala:30:73, :32:36]
wire [10:0] _schedule_T_875 = _schedule_T_863 | _schedule_T_864; // @[Mux.scala:30:73]
wire [10:0] _schedule_T_876 = _schedule_T_875 | _schedule_T_865; // @[Mux.scala:30:73]
wire [10:0] _schedule_T_877 = _schedule_T_876 | _schedule_T_866; // @[Mux.scala:30:73]
wire [10:0] _schedule_T_878 = _schedule_T_877 | _schedule_T_867; // @[Mux.scala:30:73]
wire [10:0] _schedule_T_879 = _schedule_T_878 | _schedule_T_868; // @[Mux.scala:30:73]
wire [10:0] _schedule_T_880 = _schedule_T_879 | _schedule_T_869; // @[Mux.scala:30:73]
wire [10:0] _schedule_T_881 = _schedule_T_880 | _schedule_T_870; // @[Mux.scala:30:73]
wire [10:0] _schedule_T_882 = _schedule_T_881 | _schedule_T_871; // @[Mux.scala:30:73]
wire [10:0] _schedule_T_883 = _schedule_T_882 | _schedule_T_872; // @[Mux.scala:30:73]
wire [10:0] _schedule_T_884 = _schedule_T_883 | _schedule_T_873; // @[Mux.scala:30:73]
wire [10:0] _schedule_T_885 = _schedule_T_884 | _schedule_T_874; // @[Mux.scala:30:73]
assign _schedule_WIRE_51 = _schedule_T_885; // @[Mux.scala:30:73]
assign _schedule_WIRE_49_set = _schedule_WIRE_51; // @[Mux.scala:30:73]
wire [8:0] _schedule_T_886 = _schedule_T ? _mshrs_0_io_schedule_bits_b_bits_tag : 9'h0; // @[Mux.scala:30:73, :32:36]
wire [8:0] _schedule_T_887 = _schedule_T_1 ? _mshrs_1_io_schedule_bits_b_bits_tag : 9'h0; // @[Mux.scala:30:73, :32:36]
wire [8:0] _schedule_T_888 = _schedule_T_2 ? _mshrs_2_io_schedule_bits_b_bits_tag : 9'h0; // @[Mux.scala:30:73, :32:36]
wire [8:0] _schedule_T_889 = _schedule_T_3 ? _mshrs_3_io_schedule_bits_b_bits_tag : 9'h0; // @[Mux.scala:30:73, :32:36]
wire [8:0] _schedule_T_890 = _schedule_T_4 ? _mshrs_4_io_schedule_bits_b_bits_tag : 9'h0; // @[Mux.scala:30:73, :32:36]
wire [8:0] _schedule_T_891 = _schedule_T_5 ? _mshrs_5_io_schedule_bits_b_bits_tag : 9'h0; // @[Mux.scala:30:73, :32:36]
wire [8:0] _schedule_T_892 = _schedule_T_6 ? _mshrs_6_io_schedule_bits_b_bits_tag : 9'h0; // @[Mux.scala:30:73, :32:36]
wire [8:0] _schedule_T_893 = _schedule_T_7 ? _mshrs_7_io_schedule_bits_b_bits_tag : 9'h0; // @[Mux.scala:30:73, :32:36]
wire [8:0] _schedule_T_894 = _schedule_T_8 ? _mshrs_8_io_schedule_bits_b_bits_tag : 9'h0; // @[Mux.scala:30:73, :32:36]
wire [8:0] _schedule_T_895 = _schedule_T_9 ? _mshrs_9_io_schedule_bits_b_bits_tag : 9'h0; // @[Mux.scala:30:73, :32:36]
wire [8:0] _schedule_T_896 = _schedule_T_10 ? _mshrs_10_io_schedule_bits_b_bits_tag : 9'h0; // @[Mux.scala:30:73, :32:36]
wire [8:0] _schedule_T_897 = _schedule_T_11 ? _mshrs_11_io_schedule_bits_b_bits_tag : 9'h0; // @[Mux.scala:30:73, :32:36]
wire [8:0] _schedule_T_898 = _schedule_T_886 | _schedule_T_887; // @[Mux.scala:30:73]
wire [8:0] _schedule_T_899 = _schedule_T_898 | _schedule_T_888; // @[Mux.scala:30:73]
wire [8:0] _schedule_T_900 = _schedule_T_899 | _schedule_T_889; // @[Mux.scala:30:73]
wire [8:0] _schedule_T_901 = _schedule_T_900 | _schedule_T_890; // @[Mux.scala:30:73]
wire [8:0] _schedule_T_902 = _schedule_T_901 | _schedule_T_891; // @[Mux.scala:30:73]
wire [8:0] _schedule_T_903 = _schedule_T_902 | _schedule_T_892; // @[Mux.scala:30:73]
wire [8:0] _schedule_T_904 = _schedule_T_903 | _schedule_T_893; // @[Mux.scala:30:73]
wire [8:0] _schedule_T_905 = _schedule_T_904 | _schedule_T_894; // @[Mux.scala:30:73]
wire [8:0] _schedule_T_906 = _schedule_T_905 | _schedule_T_895; // @[Mux.scala:30:73]
wire [8:0] _schedule_T_907 = _schedule_T_906 | _schedule_T_896; // @[Mux.scala:30:73]
wire [8:0] _schedule_T_908 = _schedule_T_907 | _schedule_T_897; // @[Mux.scala:30:73]
assign _schedule_WIRE_52 = _schedule_T_908; // @[Mux.scala:30:73]
assign _schedule_WIRE_49_tag = _schedule_WIRE_52; // @[Mux.scala:30:73]
wire [2:0] _schedule_T_909 = _schedule_T ? _mshrs_0_io_schedule_bits_b_bits_param : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _schedule_T_910 = _schedule_T_1 ? _mshrs_1_io_schedule_bits_b_bits_param : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _schedule_T_911 = _schedule_T_2 ? _mshrs_2_io_schedule_bits_b_bits_param : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _schedule_T_912 = _schedule_T_3 ? _mshrs_3_io_schedule_bits_b_bits_param : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _schedule_T_913 = _schedule_T_4 ? _mshrs_4_io_schedule_bits_b_bits_param : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _schedule_T_914 = _schedule_T_5 ? _mshrs_5_io_schedule_bits_b_bits_param : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _schedule_T_915 = _schedule_T_6 ? _mshrs_6_io_schedule_bits_b_bits_param : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _schedule_T_916 = _schedule_T_7 ? _mshrs_7_io_schedule_bits_b_bits_param : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _schedule_T_917 = _schedule_T_8 ? _mshrs_8_io_schedule_bits_b_bits_param : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _schedule_T_918 = _schedule_T_9 ? _mshrs_9_io_schedule_bits_b_bits_param : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _schedule_T_919 = _schedule_T_10 ? _mshrs_10_io_schedule_bits_b_bits_param : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _schedule_T_920 = _schedule_T_11 ? _mshrs_11_io_schedule_bits_b_bits_param : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _schedule_T_921 = _schedule_T_909 | _schedule_T_910; // @[Mux.scala:30:73]
wire [2:0] _schedule_T_922 = _schedule_T_921 | _schedule_T_911; // @[Mux.scala:30:73]
wire [2:0] _schedule_T_923 = _schedule_T_922 | _schedule_T_912; // @[Mux.scala:30:73]
wire [2:0] _schedule_T_924 = _schedule_T_923 | _schedule_T_913; // @[Mux.scala:30:73]
wire [2:0] _schedule_T_925 = _schedule_T_924 | _schedule_T_914; // @[Mux.scala:30:73]
wire [2:0] _schedule_T_926 = _schedule_T_925 | _schedule_T_915; // @[Mux.scala:30:73]
wire [2:0] _schedule_T_927 = _schedule_T_926 | _schedule_T_916; // @[Mux.scala:30:73]
wire [2:0] _schedule_T_928 = _schedule_T_927 | _schedule_T_917; // @[Mux.scala:30:73]
wire [2:0] _schedule_T_929 = _schedule_T_928 | _schedule_T_918; // @[Mux.scala:30:73]
wire [2:0] _schedule_T_930 = _schedule_T_929 | _schedule_T_919; // @[Mux.scala:30:73]
wire [2:0] _schedule_T_931 = _schedule_T_930 | _schedule_T_920; // @[Mux.scala:30:73]
assign _schedule_WIRE_53 = _schedule_T_931; // @[Mux.scala:30:73]
assign _schedule_WIRE_49_param = _schedule_WIRE_53; // @[Mux.scala:30:73]
wire _schedule_T_932 = _schedule_T & _mshrs_0_io_schedule_bits_b_valid; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_933 = _schedule_T_1 & _mshrs_1_io_schedule_bits_b_valid; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_934 = _schedule_T_2 & _mshrs_2_io_schedule_bits_b_valid; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_935 = _schedule_T_3 & _mshrs_3_io_schedule_bits_b_valid; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_936 = _schedule_T_4 & _mshrs_4_io_schedule_bits_b_valid; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_937 = _schedule_T_5 & _mshrs_5_io_schedule_bits_b_valid; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_938 = _schedule_T_6 & _mshrs_6_io_schedule_bits_b_valid; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_939 = _schedule_T_7 & _mshrs_7_io_schedule_bits_b_valid; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_940 = _schedule_T_8 & _mshrs_8_io_schedule_bits_b_valid; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_941 = _schedule_T_9 & _mshrs_9_io_schedule_bits_b_valid; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_942 = _schedule_T_10 & _mshrs_10_io_schedule_bits_b_valid; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_943 = _schedule_T_11 & _mshrs_11_io_schedule_bits_b_valid; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_944 = _schedule_T_932 | _schedule_T_933; // @[Mux.scala:30:73]
wire _schedule_T_945 = _schedule_T_944 | _schedule_T_934; // @[Mux.scala:30:73]
wire _schedule_T_946 = _schedule_T_945 | _schedule_T_935; // @[Mux.scala:30:73]
wire _schedule_T_947 = _schedule_T_946 | _schedule_T_936; // @[Mux.scala:30:73]
wire _schedule_T_948 = _schedule_T_947 | _schedule_T_937; // @[Mux.scala:30:73]
wire _schedule_T_949 = _schedule_T_948 | _schedule_T_938; // @[Mux.scala:30:73]
wire _schedule_T_950 = _schedule_T_949 | _schedule_T_939; // @[Mux.scala:30:73]
wire _schedule_T_951 = _schedule_T_950 | _schedule_T_940; // @[Mux.scala:30:73]
wire _schedule_T_952 = _schedule_T_951 | _schedule_T_941; // @[Mux.scala:30:73]
wire _schedule_T_953 = _schedule_T_952 | _schedule_T_942; // @[Mux.scala:30:73]
wire _schedule_T_954 = _schedule_T_953 | _schedule_T_943; // @[Mux.scala:30:73]
assign _schedule_WIRE_54 = _schedule_T_954; // @[Mux.scala:30:73]
assign _schedule_WIRE_48_valid = _schedule_WIRE_54; // @[Mux.scala:30:73]
wire _schedule_WIRE_62; // @[Mux.scala:30:73]
assign schedule_a_valid = _schedule_WIRE_55_valid; // @[Mux.scala:30:73]
wire [8:0] _schedule_WIRE_56_tag; // @[Mux.scala:30:73]
assign schedule_a_bits_tag = _schedule_WIRE_55_bits_tag; // @[Mux.scala:30:73]
wire [10:0] _schedule_WIRE_56_set; // @[Mux.scala:30:73]
assign schedule_a_bits_set = _schedule_WIRE_55_bits_set; // @[Mux.scala:30:73]
wire [2:0] _schedule_WIRE_56_param; // @[Mux.scala:30:73]
assign schedule_a_bits_param = _schedule_WIRE_55_bits_param; // @[Mux.scala:30:73]
wire _schedule_WIRE_56_block; // @[Mux.scala:30:73]
assign schedule_a_bits_block = _schedule_WIRE_55_bits_block; // @[Mux.scala:30:73]
wire [8:0] _schedule_WIRE_61; // @[Mux.scala:30:73]
assign _schedule_WIRE_55_bits_tag = _schedule_WIRE_56_tag; // @[Mux.scala:30:73]
wire [10:0] _schedule_WIRE_60; // @[Mux.scala:30:73]
assign _schedule_WIRE_55_bits_set = _schedule_WIRE_56_set; // @[Mux.scala:30:73]
wire [2:0] _schedule_WIRE_59; // @[Mux.scala:30:73]
assign _schedule_WIRE_55_bits_param = _schedule_WIRE_56_param; // @[Mux.scala:30:73]
wire _schedule_WIRE_57; // @[Mux.scala:30:73]
assign _schedule_WIRE_55_bits_block = _schedule_WIRE_56_block; // @[Mux.scala:30:73]
wire _schedule_T_955 = _schedule_T & _mshrs_0_io_schedule_bits_a_bits_block; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_956 = _schedule_T_1 & _mshrs_1_io_schedule_bits_a_bits_block; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_957 = _schedule_T_2 & _mshrs_2_io_schedule_bits_a_bits_block; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_958 = _schedule_T_3 & _mshrs_3_io_schedule_bits_a_bits_block; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_959 = _schedule_T_4 & _mshrs_4_io_schedule_bits_a_bits_block; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_960 = _schedule_T_5 & _mshrs_5_io_schedule_bits_a_bits_block; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_961 = _schedule_T_6 & _mshrs_6_io_schedule_bits_a_bits_block; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_962 = _schedule_T_7 & _mshrs_7_io_schedule_bits_a_bits_block; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_963 = _schedule_T_8 & _mshrs_8_io_schedule_bits_a_bits_block; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_964 = _schedule_T_9 & _mshrs_9_io_schedule_bits_a_bits_block; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_965 = _schedule_T_10 & _mshrs_10_io_schedule_bits_a_bits_block; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_966 = _schedule_T_11 & _mshrs_11_io_schedule_bits_a_bits_block; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_967 = _schedule_T_955 | _schedule_T_956; // @[Mux.scala:30:73]
wire _schedule_T_968 = _schedule_T_967 | _schedule_T_957; // @[Mux.scala:30:73]
wire _schedule_T_969 = _schedule_T_968 | _schedule_T_958; // @[Mux.scala:30:73]
wire _schedule_T_970 = _schedule_T_969 | _schedule_T_959; // @[Mux.scala:30:73]
wire _schedule_T_971 = _schedule_T_970 | _schedule_T_960; // @[Mux.scala:30:73]
wire _schedule_T_972 = _schedule_T_971 | _schedule_T_961; // @[Mux.scala:30:73]
wire _schedule_T_973 = _schedule_T_972 | _schedule_T_962; // @[Mux.scala:30:73]
wire _schedule_T_974 = _schedule_T_973 | _schedule_T_963; // @[Mux.scala:30:73]
wire _schedule_T_975 = _schedule_T_974 | _schedule_T_964; // @[Mux.scala:30:73]
wire _schedule_T_976 = _schedule_T_975 | _schedule_T_965; // @[Mux.scala:30:73]
wire _schedule_T_977 = _schedule_T_976 | _schedule_T_966; // @[Mux.scala:30:73]
assign _schedule_WIRE_57 = _schedule_T_977; // @[Mux.scala:30:73]
assign _schedule_WIRE_56_block = _schedule_WIRE_57; // @[Mux.scala:30:73]
wire [2:0] _schedule_T_1001 = _schedule_T ? _mshrs_0_io_schedule_bits_a_bits_param : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _schedule_T_1002 = _schedule_T_1 ? _mshrs_1_io_schedule_bits_a_bits_param : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _schedule_T_1003 = _schedule_T_2 ? _mshrs_2_io_schedule_bits_a_bits_param : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _schedule_T_1004 = _schedule_T_3 ? _mshrs_3_io_schedule_bits_a_bits_param : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _schedule_T_1005 = _schedule_T_4 ? _mshrs_4_io_schedule_bits_a_bits_param : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _schedule_T_1006 = _schedule_T_5 ? _mshrs_5_io_schedule_bits_a_bits_param : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _schedule_T_1007 = _schedule_T_6 ? _mshrs_6_io_schedule_bits_a_bits_param : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _schedule_T_1008 = _schedule_T_7 ? _mshrs_7_io_schedule_bits_a_bits_param : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _schedule_T_1009 = _schedule_T_8 ? _mshrs_8_io_schedule_bits_a_bits_param : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _schedule_T_1010 = _schedule_T_9 ? _mshrs_9_io_schedule_bits_a_bits_param : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _schedule_T_1011 = _schedule_T_10 ? _mshrs_10_io_schedule_bits_a_bits_param : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _schedule_T_1012 = _schedule_T_11 ? _mshrs_11_io_schedule_bits_a_bits_param : 3'h0; // @[Mux.scala:30:73, :32:36]
wire [2:0] _schedule_T_1013 = _schedule_T_1001 | _schedule_T_1002; // @[Mux.scala:30:73]
wire [2:0] _schedule_T_1014 = _schedule_T_1013 | _schedule_T_1003; // @[Mux.scala:30:73]
wire [2:0] _schedule_T_1015 = _schedule_T_1014 | _schedule_T_1004; // @[Mux.scala:30:73]
wire [2:0] _schedule_T_1016 = _schedule_T_1015 | _schedule_T_1005; // @[Mux.scala:30:73]
wire [2:0] _schedule_T_1017 = _schedule_T_1016 | _schedule_T_1006; // @[Mux.scala:30:73]
wire [2:0] _schedule_T_1018 = _schedule_T_1017 | _schedule_T_1007; // @[Mux.scala:30:73]
wire [2:0] _schedule_T_1019 = _schedule_T_1018 | _schedule_T_1008; // @[Mux.scala:30:73]
wire [2:0] _schedule_T_1020 = _schedule_T_1019 | _schedule_T_1009; // @[Mux.scala:30:73]
wire [2:0] _schedule_T_1021 = _schedule_T_1020 | _schedule_T_1010; // @[Mux.scala:30:73]
wire [2:0] _schedule_T_1022 = _schedule_T_1021 | _schedule_T_1011; // @[Mux.scala:30:73]
wire [2:0] _schedule_T_1023 = _schedule_T_1022 | _schedule_T_1012; // @[Mux.scala:30:73]
assign _schedule_WIRE_59 = _schedule_T_1023; // @[Mux.scala:30:73]
assign _schedule_WIRE_56_param = _schedule_WIRE_59; // @[Mux.scala:30:73]
wire [10:0] _schedule_T_1024 = _schedule_T ? _mshrs_0_io_schedule_bits_a_bits_set : 11'h0; // @[Mux.scala:30:73, :32:36]
wire [10:0] _schedule_T_1025 = _schedule_T_1 ? _mshrs_1_io_schedule_bits_a_bits_set : 11'h0; // @[Mux.scala:30:73, :32:36]
wire [10:0] _schedule_T_1026 = _schedule_T_2 ? _mshrs_2_io_schedule_bits_a_bits_set : 11'h0; // @[Mux.scala:30:73, :32:36]
wire [10:0] _schedule_T_1027 = _schedule_T_3 ? _mshrs_3_io_schedule_bits_a_bits_set : 11'h0; // @[Mux.scala:30:73, :32:36]
wire [10:0] _schedule_T_1028 = _schedule_T_4 ? _mshrs_4_io_schedule_bits_a_bits_set : 11'h0; // @[Mux.scala:30:73, :32:36]
wire [10:0] _schedule_T_1029 = _schedule_T_5 ? _mshrs_5_io_schedule_bits_a_bits_set : 11'h0; // @[Mux.scala:30:73, :32:36]
wire [10:0] _schedule_T_1030 = _schedule_T_6 ? _mshrs_6_io_schedule_bits_a_bits_set : 11'h0; // @[Mux.scala:30:73, :32:36]
wire [10:0] _schedule_T_1031 = _schedule_T_7 ? _mshrs_7_io_schedule_bits_a_bits_set : 11'h0; // @[Mux.scala:30:73, :32:36]
wire [10:0] _schedule_T_1032 = _schedule_T_8 ? _mshrs_8_io_schedule_bits_a_bits_set : 11'h0; // @[Mux.scala:30:73, :32:36]
wire [10:0] _schedule_T_1033 = _schedule_T_9 ? _mshrs_9_io_schedule_bits_a_bits_set : 11'h0; // @[Mux.scala:30:73, :32:36]
wire [10:0] _schedule_T_1034 = _schedule_T_10 ? _mshrs_10_io_schedule_bits_a_bits_set : 11'h0; // @[Mux.scala:30:73, :32:36]
wire [10:0] _schedule_T_1035 = _schedule_T_11 ? _mshrs_11_io_schedule_bits_a_bits_set : 11'h0; // @[Mux.scala:30:73, :32:36]
wire [10:0] _schedule_T_1036 = _schedule_T_1024 | _schedule_T_1025; // @[Mux.scala:30:73]
wire [10:0] _schedule_T_1037 = _schedule_T_1036 | _schedule_T_1026; // @[Mux.scala:30:73]
wire [10:0] _schedule_T_1038 = _schedule_T_1037 | _schedule_T_1027; // @[Mux.scala:30:73]
wire [10:0] _schedule_T_1039 = _schedule_T_1038 | _schedule_T_1028; // @[Mux.scala:30:73]
wire [10:0] _schedule_T_1040 = _schedule_T_1039 | _schedule_T_1029; // @[Mux.scala:30:73]
wire [10:0] _schedule_T_1041 = _schedule_T_1040 | _schedule_T_1030; // @[Mux.scala:30:73]
wire [10:0] _schedule_T_1042 = _schedule_T_1041 | _schedule_T_1031; // @[Mux.scala:30:73]
wire [10:0] _schedule_T_1043 = _schedule_T_1042 | _schedule_T_1032; // @[Mux.scala:30:73]
wire [10:0] _schedule_T_1044 = _schedule_T_1043 | _schedule_T_1033; // @[Mux.scala:30:73]
wire [10:0] _schedule_T_1045 = _schedule_T_1044 | _schedule_T_1034; // @[Mux.scala:30:73]
wire [10:0] _schedule_T_1046 = _schedule_T_1045 | _schedule_T_1035; // @[Mux.scala:30:73]
assign _schedule_WIRE_60 = _schedule_T_1046; // @[Mux.scala:30:73]
assign _schedule_WIRE_56_set = _schedule_WIRE_60; // @[Mux.scala:30:73]
wire [8:0] _schedule_T_1047 = _schedule_T ? _mshrs_0_io_schedule_bits_a_bits_tag : 9'h0; // @[Mux.scala:30:73, :32:36]
wire [8:0] _schedule_T_1048 = _schedule_T_1 ? _mshrs_1_io_schedule_bits_a_bits_tag : 9'h0; // @[Mux.scala:30:73, :32:36]
wire [8:0] _schedule_T_1049 = _schedule_T_2 ? _mshrs_2_io_schedule_bits_a_bits_tag : 9'h0; // @[Mux.scala:30:73, :32:36]
wire [8:0] _schedule_T_1050 = _schedule_T_3 ? _mshrs_3_io_schedule_bits_a_bits_tag : 9'h0; // @[Mux.scala:30:73, :32:36]
wire [8:0] _schedule_T_1051 = _schedule_T_4 ? _mshrs_4_io_schedule_bits_a_bits_tag : 9'h0; // @[Mux.scala:30:73, :32:36]
wire [8:0] _schedule_T_1052 = _schedule_T_5 ? _mshrs_5_io_schedule_bits_a_bits_tag : 9'h0; // @[Mux.scala:30:73, :32:36]
wire [8:0] _schedule_T_1053 = _schedule_T_6 ? _mshrs_6_io_schedule_bits_a_bits_tag : 9'h0; // @[Mux.scala:30:73, :32:36]
wire [8:0] _schedule_T_1054 = _schedule_T_7 ? _mshrs_7_io_schedule_bits_a_bits_tag : 9'h0; // @[Mux.scala:30:73, :32:36]
wire [8:0] _schedule_T_1055 = _schedule_T_8 ? _mshrs_8_io_schedule_bits_a_bits_tag : 9'h0; // @[Mux.scala:30:73, :32:36]
wire [8:0] _schedule_T_1056 = _schedule_T_9 ? _mshrs_9_io_schedule_bits_a_bits_tag : 9'h0; // @[Mux.scala:30:73, :32:36]
wire [8:0] _schedule_T_1057 = _schedule_T_10 ? _mshrs_10_io_schedule_bits_a_bits_tag : 9'h0; // @[Mux.scala:30:73, :32:36]
wire [8:0] _schedule_T_1058 = _schedule_T_11 ? _mshrs_11_io_schedule_bits_a_bits_tag : 9'h0; // @[Mux.scala:30:73, :32:36]
wire [8:0] _schedule_T_1059 = _schedule_T_1047 | _schedule_T_1048; // @[Mux.scala:30:73]
wire [8:0] _schedule_T_1060 = _schedule_T_1059 | _schedule_T_1049; // @[Mux.scala:30:73]
wire [8:0] _schedule_T_1061 = _schedule_T_1060 | _schedule_T_1050; // @[Mux.scala:30:73]
wire [8:0] _schedule_T_1062 = _schedule_T_1061 | _schedule_T_1051; // @[Mux.scala:30:73]
wire [8:0] _schedule_T_1063 = _schedule_T_1062 | _schedule_T_1052; // @[Mux.scala:30:73]
wire [8:0] _schedule_T_1064 = _schedule_T_1063 | _schedule_T_1053; // @[Mux.scala:30:73]
wire [8:0] _schedule_T_1065 = _schedule_T_1064 | _schedule_T_1054; // @[Mux.scala:30:73]
wire [8:0] _schedule_T_1066 = _schedule_T_1065 | _schedule_T_1055; // @[Mux.scala:30:73]
wire [8:0] _schedule_T_1067 = _schedule_T_1066 | _schedule_T_1056; // @[Mux.scala:30:73]
wire [8:0] _schedule_T_1068 = _schedule_T_1067 | _schedule_T_1057; // @[Mux.scala:30:73]
wire [8:0] _schedule_T_1069 = _schedule_T_1068 | _schedule_T_1058; // @[Mux.scala:30:73]
assign _schedule_WIRE_61 = _schedule_T_1069; // @[Mux.scala:30:73]
assign _schedule_WIRE_56_tag = _schedule_WIRE_61; // @[Mux.scala:30:73]
wire _schedule_T_1070 = _schedule_T & _mshrs_0_io_schedule_bits_a_valid; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_1071 = _schedule_T_1 & _mshrs_1_io_schedule_bits_a_valid; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_1072 = _schedule_T_2 & _mshrs_2_io_schedule_bits_a_valid; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_1073 = _schedule_T_3 & _mshrs_3_io_schedule_bits_a_valid; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_1074 = _schedule_T_4 & _mshrs_4_io_schedule_bits_a_valid; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_1075 = _schedule_T_5 & _mshrs_5_io_schedule_bits_a_valid; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_1076 = _schedule_T_6 & _mshrs_6_io_schedule_bits_a_valid; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_1077 = _schedule_T_7 & _mshrs_7_io_schedule_bits_a_valid; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_1078 = _schedule_T_8 & _mshrs_8_io_schedule_bits_a_valid; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_1079 = _schedule_T_9 & _mshrs_9_io_schedule_bits_a_valid; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_1080 = _schedule_T_10 & _mshrs_10_io_schedule_bits_a_valid; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_1081 = _schedule_T_11 & _mshrs_11_io_schedule_bits_a_valid; // @[Mux.scala:30:73, :32:36]
wire _schedule_T_1082 = _schedule_T_1070 | _schedule_T_1071; // @[Mux.scala:30:73]
wire _schedule_T_1083 = _schedule_T_1082 | _schedule_T_1072; // @[Mux.scala:30:73]
wire _schedule_T_1084 = _schedule_T_1083 | _schedule_T_1073; // @[Mux.scala:30:73]
wire _schedule_T_1085 = _schedule_T_1084 | _schedule_T_1074; // @[Mux.scala:30:73]
wire _schedule_T_1086 = _schedule_T_1085 | _schedule_T_1075; // @[Mux.scala:30:73]
wire _schedule_T_1087 = _schedule_T_1086 | _schedule_T_1076; // @[Mux.scala:30:73]
wire _schedule_T_1088 = _schedule_T_1087 | _schedule_T_1077; // @[Mux.scala:30:73]
wire _schedule_T_1089 = _schedule_T_1088 | _schedule_T_1078; // @[Mux.scala:30:73]
wire _schedule_T_1090 = _schedule_T_1089 | _schedule_T_1079; // @[Mux.scala:30:73]
wire _schedule_T_1091 = _schedule_T_1090 | _schedule_T_1080; // @[Mux.scala:30:73]
wire _schedule_T_1092 = _schedule_T_1091 | _schedule_T_1081; // @[Mux.scala:30:73]
assign _schedule_WIRE_62 = _schedule_T_1092; // @[Mux.scala:30:73]
assign _schedule_WIRE_55_valid = _schedule_WIRE_62; // @[Mux.scala:30:73]
wire [8:0] _scheduleTag_T_12 = _scheduleTag_T ? _mshrs_0_io_status_bits_tag : 9'h0; // @[Mux.scala:30:73, :32:36]
wire [8:0] _scheduleTag_T_13 = _scheduleTag_T_1 ? _mshrs_1_io_status_bits_tag : 9'h0; // @[Mux.scala:30:73, :32:36]
wire [8:0] _scheduleTag_T_14 = _scheduleTag_T_2 ? _mshrs_2_io_status_bits_tag : 9'h0; // @[Mux.scala:30:73, :32:36]
wire [8:0] _scheduleTag_T_15 = _scheduleTag_T_3 ? _mshrs_3_io_status_bits_tag : 9'h0; // @[Mux.scala:30:73, :32:36]
wire [8:0] _scheduleTag_T_16 = _scheduleTag_T_4 ? _mshrs_4_io_status_bits_tag : 9'h0; // @[Mux.scala:30:73, :32:36]
wire [8:0] _scheduleTag_T_17 = _scheduleTag_T_5 ? _mshrs_5_io_status_bits_tag : 9'h0; // @[Mux.scala:30:73, :32:36]
wire [8:0] _scheduleTag_T_18 = _scheduleTag_T_6 ? _mshrs_6_io_status_bits_tag : 9'h0; // @[Mux.scala:30:73, :32:36]
wire [8:0] _scheduleTag_T_19 = _scheduleTag_T_7 ? _mshrs_7_io_status_bits_tag : 9'h0; // @[Mux.scala:30:73, :32:36]
wire [8:0] _scheduleTag_T_20 = _scheduleTag_T_8 ? _mshrs_8_io_status_bits_tag : 9'h0; // @[Mux.scala:30:73, :32:36]
wire [8:0] _scheduleTag_T_21 = _scheduleTag_T_9 ? _mshrs_9_io_status_bits_tag : 9'h0; // @[Mux.scala:30:73, :32:36]
wire [8:0] _scheduleTag_T_22 = _scheduleTag_T_10 ? _mshrs_10_io_status_bits_tag : 9'h0; // @[Mux.scala:30:73, :32:36]
wire [8:0] _scheduleTag_T_23 = _scheduleTag_T_11 ? _mshrs_11_io_status_bits_tag : 9'h0; // @[Mux.scala:30:73, :32:36]
wire [8:0] _scheduleTag_T_24 = _scheduleTag_T_12 | _scheduleTag_T_13; // @[Mux.scala:30:73]
wire [8:0] _scheduleTag_T_25 = _scheduleTag_T_24 | _scheduleTag_T_14; // @[Mux.scala:30:73]
wire [8:0] _scheduleTag_T_26 = _scheduleTag_T_25 | _scheduleTag_T_15; // @[Mux.scala:30:73]
wire [8:0] _scheduleTag_T_27 = _scheduleTag_T_26 | _scheduleTag_T_16; // @[Mux.scala:30:73]
wire [8:0] _scheduleTag_T_28 = _scheduleTag_T_27 | _scheduleTag_T_17; // @[Mux.scala:30:73]
wire [8:0] _scheduleTag_T_29 = _scheduleTag_T_28 | _scheduleTag_T_18; // @[Mux.scala:30:73]
wire [8:0] _scheduleTag_T_30 = _scheduleTag_T_29 | _scheduleTag_T_19; // @[Mux.scala:30:73]
wire [8:0] _scheduleTag_T_31 = _scheduleTag_T_30 | _scheduleTag_T_20; // @[Mux.scala:30:73]
wire [8:0] _scheduleTag_T_32 = _scheduleTag_T_31 | _scheduleTag_T_21; // @[Mux.scala:30:73]
wire [8:0] _scheduleTag_T_33 = _scheduleTag_T_32 | _scheduleTag_T_22; // @[Mux.scala:30:73]
wire [8:0] _scheduleTag_T_34 = _scheduleTag_T_33 | _scheduleTag_T_23; // @[Mux.scala:30:73]
wire [8:0] scheduleTag = _scheduleTag_T_34; // @[Mux.scala:30:73]
wire [10:0] _scheduleSet_T_12 = _scheduleSet_T ? _mshrs_0_io_status_bits_set : 11'h0; // @[Mux.scala:30:73, :32:36]
wire [10:0] _scheduleSet_T_13 = _scheduleSet_T_1 ? _mshrs_1_io_status_bits_set : 11'h0; // @[Mux.scala:30:73, :32:36]
wire [10:0] _scheduleSet_T_14 = _scheduleSet_T_2 ? _mshrs_2_io_status_bits_set : 11'h0; // @[Mux.scala:30:73, :32:36]
wire [10:0] _scheduleSet_T_15 = _scheduleSet_T_3 ? _mshrs_3_io_status_bits_set : 11'h0; // @[Mux.scala:30:73, :32:36]
wire [10:0] _scheduleSet_T_16 = _scheduleSet_T_4 ? _mshrs_4_io_status_bits_set : 11'h0; // @[Mux.scala:30:73, :32:36]
wire [10:0] _scheduleSet_T_17 = _scheduleSet_T_5 ? _mshrs_5_io_status_bits_set : 11'h0; // @[Mux.scala:30:73, :32:36]
wire [10:0] _scheduleSet_T_18 = _scheduleSet_T_6 ? _mshrs_6_io_status_bits_set : 11'h0; // @[Mux.scala:30:73, :32:36]
wire [10:0] _scheduleSet_T_19 = _scheduleSet_T_7 ? _mshrs_7_io_status_bits_set : 11'h0; // @[Mux.scala:30:73, :32:36]
wire [10:0] _scheduleSet_T_20 = _scheduleSet_T_8 ? _mshrs_8_io_status_bits_set : 11'h0; // @[Mux.scala:30:73, :32:36]
wire [10:0] _scheduleSet_T_21 = _scheduleSet_T_9 ? _mshrs_9_io_status_bits_set : 11'h0; // @[Mux.scala:30:73, :32:36]
wire [10:0] _scheduleSet_T_22 = _scheduleSet_T_10 ? _mshrs_10_io_status_bits_set : 11'h0; // @[Mux.scala:30:73, :32:36]
wire [10:0] _scheduleSet_T_23 = _scheduleSet_T_11 ? _mshrs_11_io_status_bits_set : 11'h0; // @[Mux.scala:30:73, :32:36]
wire [10:0] _scheduleSet_T_24 = _scheduleSet_T_12 | _scheduleSet_T_13; // @[Mux.scala:30:73]
wire [10:0] _scheduleSet_T_25 = _scheduleSet_T_24 | _scheduleSet_T_14; // @[Mux.scala:30:73]
wire [10:0] _scheduleSet_T_26 = _scheduleSet_T_25 | _scheduleSet_T_15; // @[Mux.scala:30:73]
wire [10:0] _scheduleSet_T_27 = _scheduleSet_T_26 | _scheduleSet_T_16; // @[Mux.scala:30:73]
wire [10:0] _scheduleSet_T_28 = _scheduleSet_T_27 | _scheduleSet_T_17; // @[Mux.scala:30:73]
wire [10:0] _scheduleSet_T_29 = _scheduleSet_T_28 | _scheduleSet_T_18; // @[Mux.scala:30:73]
wire [10:0] _scheduleSet_T_30 = _scheduleSet_T_29 | _scheduleSet_T_19; // @[Mux.scala:30:73]
wire [10:0] _scheduleSet_T_31 = _scheduleSet_T_30 | _scheduleSet_T_20; // @[Mux.scala:30:73]
wire [10:0] _scheduleSet_T_32 = _scheduleSet_T_31 | _scheduleSet_T_21; // @[Mux.scala:30:73]
wire [10:0] _scheduleSet_T_33 = _scheduleSet_T_32 | _scheduleSet_T_22; // @[Mux.scala:30:73]
wire [10:0] _scheduleSet_T_34 = _scheduleSet_T_33 | _scheduleSet_T_23; // @[Mux.scala:30:73]
wire [10:0] scheduleSet = _scheduleSet_T_34; // @[Mux.scala:30:73]
wire [10:0] _robin_filter_T = mshr_selectOH[11:1]; // @[package.scala:262:48]
wire [11:0] _robin_filter_T_1 = {mshr_selectOH[11], mshr_selectOH[10:0] | _robin_filter_T}; // @[Mux.scala:32:36]
wire [9:0] _robin_filter_T_2 = _robin_filter_T_1[11:2]; // @[package.scala:262:{43,48}]
wire [11:0] _robin_filter_T_3 = {_robin_filter_T_1[11:10], _robin_filter_T_1[9:0] | _robin_filter_T_2}; // @[package.scala:262:{43,48}]
wire [7:0] _robin_filter_T_4 = _robin_filter_T_3[11:4]; // @[package.scala:262:{43,48}]
wire [11:0] _robin_filter_T_5 = {_robin_filter_T_3[11:8], _robin_filter_T_3[7:0] | _robin_filter_T_4}; // @[package.scala:262:{43,48}]
wire [3:0] _robin_filter_T_6 = _robin_filter_T_5[11:8]; // @[package.scala:262:{43,48}]
wire [11:0] _robin_filter_T_7 = {_robin_filter_T_5[11:4], _robin_filter_T_5[3:0] | _robin_filter_T_6}; // @[package.scala:262:{43,48}]
wire [11:0] _robin_filter_T_8 = _robin_filter_T_7; // @[package.scala:262:43, :263:17]
wire [11:0] _robin_filter_T_9 = ~_robin_filter_T_8; // @[package.scala:263:17]
wire _schedule_c_bits_source_T = schedule_c_bits_opcode[1]; // @[Mux.scala:30:73]
assign _schedule_c_bits_source_T_1 = _schedule_c_bits_source_T ? mshr_select : 4'h0; // @[OneHot.scala:32:10]
assign schedule_c_bits_source = _schedule_c_bits_source_T_1; // @[Mux.scala:30:73]
assign _nestedwb_set_T = select_c ? _mshrs_11_io_status_bits_set : _mshrs_10_io_status_bits_set; // @[Scheduler.scala:71:46, :153:32, :155:24]
assign nestedwb_set = _nestedwb_set_T; // @[Scheduler.scala:75:22, :155:24]
assign _nestedwb_tag_T = select_c ? _mshrs_11_io_status_bits_tag : _mshrs_10_io_status_bits_tag; // @[Scheduler.scala:71:46, :153:32, :156:24]
assign nestedwb_tag = _nestedwb_tag_T; // @[Scheduler.scala:75:22, :156:24]
wire _GEN = select_bc & _mshrs_10_io_schedule_bits_dir_valid; // @[Scheduler.scala:71:46, :154:32, :157:37]
wire _nestedwb_b_toN_T; // @[Scheduler.scala:157:37]
assign _nestedwb_b_toN_T = _GEN; // @[Scheduler.scala:157:37]
wire _nestedwb_b_toB_T; // @[Scheduler.scala:158:37]
assign _nestedwb_b_toB_T = _GEN; // @[Scheduler.scala:157:37, :158:37]
assign _nestedwb_b_clr_dirty_T = _GEN; // @[Scheduler.scala:157:37, :159:37]
wire _nestedwb_b_toN_T_1 = _mshrs_10_io_schedule_bits_dir_bits_data_state == 2'h0; // @[Scheduler.scala:71:46, :157:123]
assign _nestedwb_b_toN_T_2 = _nestedwb_b_toN_T & _nestedwb_b_toN_T_1; // @[Scheduler.scala:157:{37,75,123}]
assign nestedwb_b_toN = _nestedwb_b_toN_T_2; // @[Scheduler.scala:75:22, :157:75]
wire _nestedwb_b_toB_T_1 = _mshrs_10_io_schedule_bits_dir_bits_data_state == 2'h1; // @[Scheduler.scala:71:46, :158:123]
assign _nestedwb_b_toB_T_2 = _nestedwb_b_toB_T & _nestedwb_b_toB_T_1; // @[Scheduler.scala:158:{37,75,123}]
assign nestedwb_b_toB = _nestedwb_b_toB_T_2; // @[Scheduler.scala:75:22, :158:75]
assign nestedwb_b_clr_dirty = _nestedwb_b_clr_dirty_T; // @[Scheduler.scala:75:22, :159:37]
wire _nestedwb_c_set_dirty_T = select_c & _mshrs_11_io_schedule_bits_dir_valid; // @[Scheduler.scala:71:46, :153:32, :160:37]
assign _nestedwb_c_set_dirty_T_1 = _nestedwb_c_set_dirty_T & _mshrs_11_io_schedule_bits_dir_bits_data_dirty; // @[Scheduler.scala:71:46, :160:{37,75}]
assign nestedwb_c_set_dirty = _nestedwb_c_set_dirty_T_1; // @[Scheduler.scala:75:22, :160:75]
wire _request_ready_T_2; // @[Scheduler.scala:261:40]
wire _request_valid_T_2; // @[Scheduler.scala:164:39]
wire _request_bits_T_1_prio_0; // @[Scheduler.scala:165:22]
wire _view__WIRE_prio_0 = request_bits_prio_0; // @[Scheduler.scala:163:21, :233:95]
wire _view__WIRE_1_prio_0 = request_bits_prio_0; // @[Scheduler.scala:163:21, :233:95]
wire _view__WIRE_2_prio_0 = request_bits_prio_0; // @[Scheduler.scala:163:21, :233:95]
wire _view__WIRE_3_prio_0 = request_bits_prio_0; // @[Scheduler.scala:163:21, :233:95]
wire _view__WIRE_4_prio_0 = request_bits_prio_0; // @[Scheduler.scala:163:21, :233:95]
wire _view__WIRE_5_prio_0 = request_bits_prio_0; // @[Scheduler.scala:163:21, :233:95]
wire _view__WIRE_6_prio_0 = request_bits_prio_0; // @[Scheduler.scala:163:21, :233:95]
wire _view__WIRE_7_prio_0 = request_bits_prio_0; // @[Scheduler.scala:163:21, :233:95]
wire _view__WIRE_8_prio_0 = request_bits_prio_0; // @[Scheduler.scala:163:21, :233:95]
wire _view__WIRE_9_prio_0 = request_bits_prio_0; // @[Scheduler.scala:163:21, :233:95]
wire _view__WIRE_10_prio_0 = request_bits_prio_0; // @[Scheduler.scala:163:21, :233:95]
wire _view__WIRE_11_prio_0 = request_bits_prio_0; // @[Scheduler.scala:163:21, :233:95]
wire _request_bits_T_1_prio_2; // @[Scheduler.scala:165:22]
wire _request_bits_T_1_control; // @[Scheduler.scala:165:22]
wire _view__WIRE_prio_2 = request_bits_prio_2; // @[Scheduler.scala:163:21, :233:95]
wire _view__WIRE_1_prio_2 = request_bits_prio_2; // @[Scheduler.scala:163:21, :233:95]
wire _view__WIRE_2_prio_2 = request_bits_prio_2; // @[Scheduler.scala:163:21, :233:95]
wire _view__WIRE_3_prio_2 = request_bits_prio_2; // @[Scheduler.scala:163:21, :233:95]
wire _view__WIRE_4_prio_2 = request_bits_prio_2; // @[Scheduler.scala:163:21, :233:95]
wire _view__WIRE_5_prio_2 = request_bits_prio_2; // @[Scheduler.scala:163:21, :233:95]
wire _view__WIRE_6_prio_2 = request_bits_prio_2; // @[Scheduler.scala:163:21, :233:95]
wire _view__WIRE_7_prio_2 = request_bits_prio_2; // @[Scheduler.scala:163:21, :233:95]
wire _view__WIRE_8_prio_2 = request_bits_prio_2; // @[Scheduler.scala:163:21, :233:95]
wire _view__WIRE_9_prio_2 = request_bits_prio_2; // @[Scheduler.scala:163:21, :233:95]
wire _view__WIRE_10_prio_2 = request_bits_prio_2; // @[Scheduler.scala:163:21, :233:95]
wire _view__WIRE_11_prio_2 = request_bits_prio_2; // @[Scheduler.scala:163:21, :233:95]
wire [2:0] _request_bits_T_1_opcode; // @[Scheduler.scala:165:22]
wire _view__WIRE_control = request_bits_control; // @[Scheduler.scala:163:21, :233:95]
wire _view__WIRE_1_control = request_bits_control; // @[Scheduler.scala:163:21, :233:95]
wire _view__WIRE_2_control = request_bits_control; // @[Scheduler.scala:163:21, :233:95]
wire _view__WIRE_3_control = request_bits_control; // @[Scheduler.scala:163:21, :233:95]
wire _view__WIRE_4_control = request_bits_control; // @[Scheduler.scala:163:21, :233:95]
wire _view__WIRE_5_control = request_bits_control; // @[Scheduler.scala:163:21, :233:95]
wire _view__WIRE_6_control = request_bits_control; // @[Scheduler.scala:163:21, :233:95]
wire _view__WIRE_7_control = request_bits_control; // @[Scheduler.scala:163:21, :233:95]
wire _view__WIRE_8_control = request_bits_control; // @[Scheduler.scala:163:21, :233:95]
wire _view__WIRE_9_control = request_bits_control; // @[Scheduler.scala:163:21, :233:95]
wire _view__WIRE_10_control = request_bits_control; // @[Scheduler.scala:163:21, :233:95]
wire _view__WIRE_11_control = request_bits_control; // @[Scheduler.scala:163:21, :233:95]
wire [2:0] _request_bits_T_1_param; // @[Scheduler.scala:165:22]
wire [2:0] _view__WIRE_opcode = request_bits_opcode; // @[Scheduler.scala:163:21, :233:95]
wire [2:0] _view__WIRE_1_opcode = request_bits_opcode; // @[Scheduler.scala:163:21, :233:95]
wire [2:0] _view__WIRE_2_opcode = request_bits_opcode; // @[Scheduler.scala:163:21, :233:95]
wire [2:0] _view__WIRE_3_opcode = request_bits_opcode; // @[Scheduler.scala:163:21, :233:95]
wire [2:0] _view__WIRE_4_opcode = request_bits_opcode; // @[Scheduler.scala:163:21, :233:95]
wire [2:0] _view__WIRE_5_opcode = request_bits_opcode; // @[Scheduler.scala:163:21, :233:95]
wire [2:0] _view__WIRE_6_opcode = request_bits_opcode; // @[Scheduler.scala:163:21, :233:95]
wire [2:0] _view__WIRE_7_opcode = request_bits_opcode; // @[Scheduler.scala:163:21, :233:95]
wire [2:0] _view__WIRE_8_opcode = request_bits_opcode; // @[Scheduler.scala:163:21, :233:95]
wire [2:0] _view__WIRE_9_opcode = request_bits_opcode; // @[Scheduler.scala:163:21, :233:95]
wire [2:0] _view__WIRE_10_opcode = request_bits_opcode; // @[Scheduler.scala:163:21, :233:95]
wire [2:0] _view__WIRE_11_opcode = request_bits_opcode; // @[Scheduler.scala:163:21, :233:95]
wire [2:0] _request_bits_T_1_size; // @[Scheduler.scala:165:22]
wire [2:0] _view__WIRE_param = request_bits_param; // @[Scheduler.scala:163:21, :233:95]
wire [2:0] _view__WIRE_1_param = request_bits_param; // @[Scheduler.scala:163:21, :233:95]
wire [2:0] _view__WIRE_2_param = request_bits_param; // @[Scheduler.scala:163:21, :233:95]
wire [2:0] _view__WIRE_3_param = request_bits_param; // @[Scheduler.scala:163:21, :233:95]
wire [2:0] _view__WIRE_4_param = request_bits_param; // @[Scheduler.scala:163:21, :233:95]
wire [2:0] _view__WIRE_5_param = request_bits_param; // @[Scheduler.scala:163:21, :233:95]
wire [2:0] _view__WIRE_6_param = request_bits_param; // @[Scheduler.scala:163:21, :233:95]
wire [2:0] _view__WIRE_7_param = request_bits_param; // @[Scheduler.scala:163:21, :233:95]
wire [2:0] _view__WIRE_8_param = request_bits_param; // @[Scheduler.scala:163:21, :233:95]
wire [2:0] _view__WIRE_9_param = request_bits_param; // @[Scheduler.scala:163:21, :233:95]
wire [2:0] _view__WIRE_10_param = request_bits_param; // @[Scheduler.scala:163:21, :233:95]
wire [2:0] _view__WIRE_11_param = request_bits_param; // @[Scheduler.scala:163:21, :233:95]
wire [5:0] _request_bits_T_1_source; // @[Scheduler.scala:165:22]
wire [2:0] _view__WIRE_size = request_bits_size; // @[Scheduler.scala:163:21, :233:95]
wire [2:0] _view__WIRE_1_size = request_bits_size; // @[Scheduler.scala:163:21, :233:95]
wire [2:0] _view__WIRE_2_size = request_bits_size; // @[Scheduler.scala:163:21, :233:95]
wire [2:0] _view__WIRE_3_size = request_bits_size; // @[Scheduler.scala:163:21, :233:95]
wire [2:0] _view__WIRE_4_size = request_bits_size; // @[Scheduler.scala:163:21, :233:95]
wire [2:0] _view__WIRE_5_size = request_bits_size; // @[Scheduler.scala:163:21, :233:95]
wire [2:0] _view__WIRE_6_size = request_bits_size; // @[Scheduler.scala:163:21, :233:95]
wire [2:0] _view__WIRE_7_size = request_bits_size; // @[Scheduler.scala:163:21, :233:95]
wire [2:0] _view__WIRE_8_size = request_bits_size; // @[Scheduler.scala:163:21, :233:95]
wire [2:0] _view__WIRE_9_size = request_bits_size; // @[Scheduler.scala:163:21, :233:95]
wire [2:0] _view__WIRE_10_size = request_bits_size; // @[Scheduler.scala:163:21, :233:95]
wire [2:0] _view__WIRE_11_size = request_bits_size; // @[Scheduler.scala:163:21, :233:95]
wire [8:0] _request_bits_T_1_tag; // @[Scheduler.scala:165:22]
wire [5:0] _view__WIRE_source = request_bits_source; // @[Scheduler.scala:163:21, :233:95]
wire [5:0] _view__WIRE_1_source = request_bits_source; // @[Scheduler.scala:163:21, :233:95]
wire [5:0] _view__WIRE_2_source = request_bits_source; // @[Scheduler.scala:163:21, :233:95]
wire [5:0] _view__WIRE_3_source = request_bits_source; // @[Scheduler.scala:163:21, :233:95]
wire [5:0] _view__WIRE_4_source = request_bits_source; // @[Scheduler.scala:163:21, :233:95]
wire [5:0] _view__WIRE_5_source = request_bits_source; // @[Scheduler.scala:163:21, :233:95]
wire [5:0] _view__WIRE_6_source = request_bits_source; // @[Scheduler.scala:163:21, :233:95]
wire [5:0] _view__WIRE_7_source = request_bits_source; // @[Scheduler.scala:163:21, :233:95]
wire [5:0] _view__WIRE_8_source = request_bits_source; // @[Scheduler.scala:163:21, :233:95]
wire [5:0] _view__WIRE_9_source = request_bits_source; // @[Scheduler.scala:163:21, :233:95]
wire [5:0] _view__WIRE_10_source = request_bits_source; // @[Scheduler.scala:163:21, :233:95]
wire [5:0] _view__WIRE_11_source = request_bits_source; // @[Scheduler.scala:163:21, :233:95]
wire [5:0] _request_bits_T_1_offset; // @[Scheduler.scala:165:22]
wire [8:0] _view__WIRE_tag = request_bits_tag; // @[Scheduler.scala:163:21, :233:95]
wire [8:0] _view__WIRE_1_tag = request_bits_tag; // @[Scheduler.scala:163:21, :233:95]
wire [8:0] _view__WIRE_2_tag = request_bits_tag; // @[Scheduler.scala:163:21, :233:95]
wire [8:0] _view__WIRE_3_tag = request_bits_tag; // @[Scheduler.scala:163:21, :233:95]
wire [8:0] _view__WIRE_4_tag = request_bits_tag; // @[Scheduler.scala:163:21, :233:95]
wire [8:0] _view__WIRE_5_tag = request_bits_tag; // @[Scheduler.scala:163:21, :233:95]
wire [8:0] _view__WIRE_6_tag = request_bits_tag; // @[Scheduler.scala:163:21, :233:95]
wire [8:0] _view__WIRE_7_tag = request_bits_tag; // @[Scheduler.scala:163:21, :233:95]
wire [8:0] _view__WIRE_8_tag = request_bits_tag; // @[Scheduler.scala:163:21, :233:95]
wire [8:0] _view__WIRE_9_tag = request_bits_tag; // @[Scheduler.scala:163:21, :233:95]
wire [8:0] _view__WIRE_10_tag = request_bits_tag; // @[Scheduler.scala:163:21, :233:95]
wire [8:0] _view__WIRE_11_tag = request_bits_tag; // @[Scheduler.scala:163:21, :233:95]
wire [5:0] _request_bits_T_1_put; // @[Scheduler.scala:165:22]
wire [5:0] _view__WIRE_offset = request_bits_offset; // @[Scheduler.scala:163:21, :233:95]
wire [5:0] _view__WIRE_1_offset = request_bits_offset; // @[Scheduler.scala:163:21, :233:95]
wire [5:0] _view__WIRE_2_offset = request_bits_offset; // @[Scheduler.scala:163:21, :233:95]
wire [5:0] _view__WIRE_3_offset = request_bits_offset; // @[Scheduler.scala:163:21, :233:95]
wire [5:0] _view__WIRE_4_offset = request_bits_offset; // @[Scheduler.scala:163:21, :233:95]
wire [5:0] _view__WIRE_5_offset = request_bits_offset; // @[Scheduler.scala:163:21, :233:95]
wire [5:0] _view__WIRE_6_offset = request_bits_offset; // @[Scheduler.scala:163:21, :233:95]
wire [5:0] _view__WIRE_7_offset = request_bits_offset; // @[Scheduler.scala:163:21, :233:95]
wire [5:0] _view__WIRE_8_offset = request_bits_offset; // @[Scheduler.scala:163:21, :233:95]
wire [5:0] _view__WIRE_9_offset = request_bits_offset; // @[Scheduler.scala:163:21, :233:95]
wire [5:0] _view__WIRE_10_offset = request_bits_offset; // @[Scheduler.scala:163:21, :233:95]
wire [5:0] _view__WIRE_11_offset = request_bits_offset; // @[Scheduler.scala:163:21, :233:95]
wire [10:0] _request_bits_T_1_set; // @[Scheduler.scala:165:22]
wire [5:0] _view__WIRE_put = request_bits_put; // @[Scheduler.scala:163:21, :233:95]
wire [5:0] _view__WIRE_1_put = request_bits_put; // @[Scheduler.scala:163:21, :233:95]
wire [5:0] _view__WIRE_2_put = request_bits_put; // @[Scheduler.scala:163:21, :233:95]
wire [5:0] _view__WIRE_3_put = request_bits_put; // @[Scheduler.scala:163:21, :233:95]
wire [5:0] _view__WIRE_4_put = request_bits_put; // @[Scheduler.scala:163:21, :233:95]
wire [5:0] _view__WIRE_5_put = request_bits_put; // @[Scheduler.scala:163:21, :233:95]
wire [5:0] _view__WIRE_6_put = request_bits_put; // @[Scheduler.scala:163:21, :233:95]
wire [5:0] _view__WIRE_7_put = request_bits_put; // @[Scheduler.scala:163:21, :233:95]
wire [5:0] _view__WIRE_8_put = request_bits_put; // @[Scheduler.scala:163:21, :233:95]
wire [5:0] _view__WIRE_9_put = request_bits_put; // @[Scheduler.scala:163:21, :233:95]
wire [5:0] _view__WIRE_10_put = request_bits_put; // @[Scheduler.scala:163:21, :233:95]
wire [5:0] _view__WIRE_11_put = request_bits_put; // @[Scheduler.scala:163:21, :233:95]
wire [10:0] request_bits_set; // @[Scheduler.scala:163:21]
wire request_ready; // @[Scheduler.scala:163:21]
wire request_valid; // @[Scheduler.scala:163:21]
wire _request_valid_T = _sinkA_io_req_valid | _sinkX_io_req_valid; // @[Scheduler.scala:54:21, :58:21, :164:62]
wire _request_valid_T_1 = _request_valid_T | _sinkC_io_req_valid; // @[Scheduler.scala:55:21, :164:{62,84}]
assign _request_valid_T_2 = _directory_io_ready & _request_valid_T_1; // @[Scheduler.scala:68:25, :164:{39,84}]
assign request_valid = _request_valid_T_2; // @[Scheduler.scala:163:21, :164:39]
wire [2:0] _request_bits_T_opcode = _sinkX_io_req_valid ? 3'h0 : _sinkA_io_req_bits_opcode; // @[Scheduler.scala:54:21, :58:21, :166:22]
wire [2:0] _request_bits_T_param = _sinkX_io_req_valid ? 3'h0 : _sinkA_io_req_bits_param; // @[Scheduler.scala:54:21, :58:21, :166:22]
wire [2:0] _request_bits_T_size = _sinkX_io_req_valid ? 3'h6 : _sinkA_io_req_bits_size; // @[Scheduler.scala:54:21, :58:21, :166:22]
wire [5:0] _request_bits_T_source = _sinkX_io_req_valid ? 6'h0 : _sinkA_io_req_bits_source; // @[Scheduler.scala:54:21, :58:21, :166:22]
wire [8:0] _request_bits_T_tag = _sinkX_io_req_valid ? _sinkX_io_req_bits_tag : _sinkA_io_req_bits_tag; // @[Scheduler.scala:54:21, :58:21, :166:22]
wire [5:0] _request_bits_T_offset = _sinkX_io_req_valid ? 6'h0 : _sinkA_io_req_bits_offset; // @[Scheduler.scala:54:21, :58:21, :166:22]
wire [5:0] _request_bits_T_put = _sinkX_io_req_valid ? 6'h0 : _sinkA_io_req_bits_put; // @[Scheduler.scala:54:21, :58:21, :166:22]
wire [10:0] _request_bits_T_set = _sinkX_io_req_valid ? _sinkX_io_req_bits_set : _sinkA_io_req_bits_set; // @[Scheduler.scala:54:21, :58:21, :166:22]
wire _request_bits_T_control; // @[Scheduler.scala:166:22]
assign _request_bits_T_1_control = ~_sinkC_io_req_valid & _request_bits_T_control; // @[Scheduler.scala:55:21, :165:22, :166:22]
assign _request_bits_T_1_opcode = _sinkC_io_req_valid ? _sinkC_io_req_bits_opcode : _request_bits_T_opcode; // @[Scheduler.scala:55:21, :165:22, :166:22]
assign _request_bits_T_1_param = _sinkC_io_req_valid ? _sinkC_io_req_bits_param : _request_bits_T_param; // @[Scheduler.scala:55:21, :165:22, :166:22]
assign _request_bits_T_1_size = _sinkC_io_req_valid ? _sinkC_io_req_bits_size : _request_bits_T_size; // @[Scheduler.scala:55:21, :165:22, :166:22]
assign _request_bits_T_1_source = _sinkC_io_req_valid ? _sinkC_io_req_bits_source : _request_bits_T_source; // @[Scheduler.scala:55:21, :165:22, :166:22]
assign _request_bits_T_1_tag = _sinkC_io_req_valid ? _sinkC_io_req_bits_tag : _request_bits_T_tag; // @[Scheduler.scala:55:21, :165:22, :166:22]
assign _request_bits_T_1_offset = _sinkC_io_req_valid ? _sinkC_io_req_bits_offset : _request_bits_T_offset; // @[Scheduler.scala:55:21, :165:22, :166:22]
assign _request_bits_T_1_put = _sinkC_io_req_valid ? _sinkC_io_req_bits_put : _request_bits_T_put; // @[Scheduler.scala:55:21, :165:22, :166:22]
assign _request_bits_T_1_set = _sinkC_io_req_valid ? _sinkC_io_req_bits_set : _request_bits_T_set; // @[Scheduler.scala:55:21, :165:22, :166:22]
assign _request_bits_T_1_prio_0 = ~_sinkC_io_req_valid; // @[Scheduler.scala:55:21, :165:22]
assign request_bits_prio_0 = _request_bits_T_1_prio_0; // @[Scheduler.scala:163:21, :165:22]
assign request_bits_prio_2 = _request_bits_T_1_prio_2; // @[Scheduler.scala:163:21, :165:22]
assign request_bits_control = _request_bits_T_1_control; // @[Scheduler.scala:163:21, :165:22]
assign request_bits_opcode = _request_bits_T_1_opcode; // @[Scheduler.scala:163:21, :165:22]
assign request_bits_param = _request_bits_T_1_param; // @[Scheduler.scala:163:21, :165:22]
assign request_bits_size = _request_bits_T_1_size; // @[Scheduler.scala:163:21, :165:22]
assign request_bits_source = _request_bits_T_1_source; // @[Scheduler.scala:163:21, :165:22]
assign request_bits_tag = _request_bits_T_1_tag; // @[Scheduler.scala:163:21, :165:22]
assign request_bits_offset = _request_bits_T_1_offset; // @[Scheduler.scala:163:21, :165:22]
assign request_bits_put = _request_bits_T_1_put; // @[Scheduler.scala:163:21, :165:22]
assign request_bits_set = _request_bits_T_1_set; // @[Scheduler.scala:163:21, :165:22]
wire _GEN_0 = _directory_io_ready & request_ready; // @[Scheduler.scala:68:25, :163:21, :167:44]
wire _sinkC_io_req_ready_T; // @[Scheduler.scala:167:44]
assign _sinkC_io_req_ready_T = _GEN_0; // @[Scheduler.scala:167:44]
wire _sinkX_io_req_ready_T; // @[Scheduler.scala:168:44]
assign _sinkX_io_req_ready_T = _GEN_0; // @[Scheduler.scala:167:44, :168:44]
wire _sinkA_io_req_ready_T; // @[Scheduler.scala:169:44]
assign _sinkA_io_req_ready_T = _GEN_0; // @[Scheduler.scala:167:44, :169:44]
wire _sinkX_io_req_ready_T_1 = ~_sinkC_io_req_valid; // @[Scheduler.scala:55:21, :165:22, :168:64]
wire _sinkX_io_req_ready_T_2 = _sinkX_io_req_ready_T & _sinkX_io_req_ready_T_1; // @[Scheduler.scala:168:{44,61,64}]
wire _sinkA_io_req_ready_T_1 = ~_sinkC_io_req_valid; // @[Scheduler.scala:55:21, :165:22, :169:64]
wire _sinkA_io_req_ready_T_2 = _sinkA_io_req_ready_T & _sinkA_io_req_ready_T_1; // @[Scheduler.scala:169:{44,61,64}]
wire _sinkA_io_req_ready_T_3 = ~_sinkX_io_req_valid; // @[Scheduler.scala:58:21, :169:87]
wire _sinkA_io_req_ready_T_4 = _sinkA_io_req_ready_T_2 & _sinkA_io_req_ready_T_3; // @[Scheduler.scala:169:{61,84,87}]
wire _setMatches_T = _mshrs_0_io_status_bits_set == request_bits_set; // @[Scheduler.scala:71:46, :163:21, :172:83]
wire _setMatches_T_1 = _mshrs_0_io_status_valid & _setMatches_T; // @[Scheduler.scala:71:46, :172:{59,83}]
wire _setMatches_T_2 = _mshrs_1_io_status_bits_set == request_bits_set; // @[Scheduler.scala:71:46, :163:21, :172:83]
wire _setMatches_T_3 = _mshrs_1_io_status_valid & _setMatches_T_2; // @[Scheduler.scala:71:46, :172:{59,83}]
wire _setMatches_T_4 = _mshrs_2_io_status_bits_set == request_bits_set; // @[Scheduler.scala:71:46, :163:21, :172:83]
wire _setMatches_T_5 = _mshrs_2_io_status_valid & _setMatches_T_4; // @[Scheduler.scala:71:46, :172:{59,83}]
wire _setMatches_T_6 = _mshrs_3_io_status_bits_set == request_bits_set; // @[Scheduler.scala:71:46, :163:21, :172:83]
wire _setMatches_T_7 = _mshrs_3_io_status_valid & _setMatches_T_6; // @[Scheduler.scala:71:46, :172:{59,83}]
wire _setMatches_T_8 = _mshrs_4_io_status_bits_set == request_bits_set; // @[Scheduler.scala:71:46, :163:21, :172:83]
wire _setMatches_T_9 = _mshrs_4_io_status_valid & _setMatches_T_8; // @[Scheduler.scala:71:46, :172:{59,83}]
wire _setMatches_T_10 = _mshrs_5_io_status_bits_set == request_bits_set; // @[Scheduler.scala:71:46, :163:21, :172:83]
wire _setMatches_T_11 = _mshrs_5_io_status_valid & _setMatches_T_10; // @[Scheduler.scala:71:46, :172:{59,83}]
wire _setMatches_T_12 = _mshrs_6_io_status_bits_set == request_bits_set; // @[Scheduler.scala:71:46, :163:21, :172:83]
wire _setMatches_T_13 = _mshrs_6_io_status_valid & _setMatches_T_12; // @[Scheduler.scala:71:46, :172:{59,83}]
wire _setMatches_T_14 = _mshrs_7_io_status_bits_set == request_bits_set; // @[Scheduler.scala:71:46, :163:21, :172:83]
wire _setMatches_T_15 = _mshrs_7_io_status_valid & _setMatches_T_14; // @[Scheduler.scala:71:46, :172:{59,83}]
wire _setMatches_T_16 = _mshrs_8_io_status_bits_set == request_bits_set; // @[Scheduler.scala:71:46, :163:21, :172:83]
wire _setMatches_T_17 = _mshrs_8_io_status_valid & _setMatches_T_16; // @[Scheduler.scala:71:46, :172:{59,83}]
wire _setMatches_T_18 = _mshrs_9_io_status_bits_set == request_bits_set; // @[Scheduler.scala:71:46, :163:21, :172:83]
wire _setMatches_T_19 = _mshrs_9_io_status_valid & _setMatches_T_18; // @[Scheduler.scala:71:46, :172:{59,83}]
wire _setMatches_T_20 = _mshrs_10_io_status_bits_set == request_bits_set; // @[Scheduler.scala:71:46, :163:21, :172:83]
wire _setMatches_T_21 = _mshrs_10_io_status_valid & _setMatches_T_20; // @[Scheduler.scala:71:46, :172:{59,83}]
wire _setMatches_T_22 = _mshrs_11_io_status_bits_set == request_bits_set; // @[Scheduler.scala:71:46, :163:21, :172:83]
wire _setMatches_T_23 = _mshrs_11_io_status_valid & _setMatches_T_22; // @[Scheduler.scala:71:46, :172:{59,83}]
wire [1:0] setMatches_lo_lo_hi = {_setMatches_T_5, _setMatches_T_3}; // @[Scheduler.scala:172:{23,59}]
wire [2:0] setMatches_lo_lo = {setMatches_lo_lo_hi, _setMatches_T_1}; // @[Scheduler.scala:172:{23,59}]
wire [1:0] setMatches_lo_hi_hi = {_setMatches_T_11, _setMatches_T_9}; // @[Scheduler.scala:172:{23,59}]
wire [2:0] setMatches_lo_hi = {setMatches_lo_hi_hi, _setMatches_T_7}; // @[Scheduler.scala:172:{23,59}]
wire [5:0] setMatches_lo = {setMatches_lo_hi, setMatches_lo_lo}; // @[Scheduler.scala:172:23]
wire [1:0] setMatches_hi_lo_hi = {_setMatches_T_17, _setMatches_T_15}; // @[Scheduler.scala:172:{23,59}]
wire [2:0] setMatches_hi_lo = {setMatches_hi_lo_hi, _setMatches_T_13}; // @[Scheduler.scala:172:{23,59}]
wire [1:0] setMatches_hi_hi_hi = {_setMatches_T_23, _setMatches_T_21}; // @[Scheduler.scala:172:{23,59}]
wire [2:0] setMatches_hi_hi = {setMatches_hi_hi_hi, _setMatches_T_19}; // @[Scheduler.scala:172:{23,59}]
wire [5:0] setMatches_hi = {setMatches_hi_hi, setMatches_hi_lo}; // @[Scheduler.scala:172:23]
wire [11:0] setMatches = {setMatches_hi, setMatches_lo}; // @[Scheduler.scala:172:23]
wire _alloc_T = |setMatches; // @[Scheduler.scala:172:23, :173:27]
wire alloc = ~_alloc_T; // @[Scheduler.scala:173:{15,27}]
wire _blockB_T = setMatches[0]; // @[Mux.scala:32:36]
wire _blockC_T = setMatches[0]; // @[Mux.scala:32:36]
wire _nestB_T = setMatches[0]; // @[Mux.scala:32:36]
wire _nestC_T = setMatches[0]; // @[Mux.scala:32:36]
wire _blockB_T_1 = setMatches[1]; // @[Mux.scala:32:36]
wire _blockC_T_1 = setMatches[1]; // @[Mux.scala:32:36]
wire _nestB_T_1 = setMatches[1]; // @[Mux.scala:32:36]
wire _nestC_T_1 = setMatches[1]; // @[Mux.scala:32:36]
wire _blockB_T_2 = setMatches[2]; // @[Mux.scala:32:36]
wire _blockC_T_2 = setMatches[2]; // @[Mux.scala:32:36]
wire _nestB_T_2 = setMatches[2]; // @[Mux.scala:32:36]
wire _nestC_T_2 = setMatches[2]; // @[Mux.scala:32:36]
wire _blockB_T_3 = setMatches[3]; // @[Mux.scala:32:36]
wire _blockC_T_3 = setMatches[3]; // @[Mux.scala:32:36]
wire _nestB_T_3 = setMatches[3]; // @[Mux.scala:32:36]
wire _nestC_T_3 = setMatches[3]; // @[Mux.scala:32:36]
wire _blockB_T_4 = setMatches[4]; // @[Mux.scala:32:36]
wire _blockC_T_4 = setMatches[4]; // @[Mux.scala:32:36]
wire _nestB_T_4 = setMatches[4]; // @[Mux.scala:32:36]
wire _nestC_T_4 = setMatches[4]; // @[Mux.scala:32:36]
wire _blockB_T_5 = setMatches[5]; // @[Mux.scala:32:36]
wire _blockC_T_5 = setMatches[5]; // @[Mux.scala:32:36]
wire _nestB_T_5 = setMatches[5]; // @[Mux.scala:32:36]
wire _nestC_T_5 = setMatches[5]; // @[Mux.scala:32:36]
wire _blockB_T_6 = setMatches[6]; // @[Mux.scala:32:36]
wire _blockC_T_6 = setMatches[6]; // @[Mux.scala:32:36]
wire _nestB_T_6 = setMatches[6]; // @[Mux.scala:32:36]
wire _nestC_T_6 = setMatches[6]; // @[Mux.scala:32:36]
wire _blockB_T_7 = setMatches[7]; // @[Mux.scala:32:36]
wire _blockC_T_7 = setMatches[7]; // @[Mux.scala:32:36]
wire _nestB_T_7 = setMatches[7]; // @[Mux.scala:32:36]
wire _nestC_T_7 = setMatches[7]; // @[Mux.scala:32:36]
wire _blockB_T_8 = setMatches[8]; // @[Mux.scala:32:36]
wire _blockC_T_8 = setMatches[8]; // @[Mux.scala:32:36]
wire _nestB_T_8 = setMatches[8]; // @[Mux.scala:32:36]
wire _nestC_T_8 = setMatches[8]; // @[Mux.scala:32:36]
wire _blockB_T_9 = setMatches[9]; // @[Mux.scala:32:36]
wire _blockC_T_9 = setMatches[9]; // @[Mux.scala:32:36]
wire _nestB_T_9 = setMatches[9]; // @[Mux.scala:32:36]
wire _nestC_T_9 = setMatches[9]; // @[Mux.scala:32:36]
wire _blockB_T_10 = setMatches[10]; // @[Mux.scala:32:36]
wire _blockC_T_10 = setMatches[10]; // @[Mux.scala:32:36]
wire _nestB_T_10 = setMatches[10]; // @[Mux.scala:32:36]
wire _nestC_T_10 = setMatches[10]; // @[Mux.scala:32:36]
wire _blockB_T_11 = setMatches[11]; // @[Mux.scala:32:36]
wire _blockC_T_11 = setMatches[11]; // @[Mux.scala:32:36]
wire _nestB_T_11 = setMatches[11]; // @[Mux.scala:32:36]
wire _nestC_T_11 = setMatches[11]; // @[Mux.scala:32:36]
wire _blockB_T_12 = _blockB_T & _mshrs_0_io_status_bits_blockB; // @[Mux.scala:30:73, :32:36]
wire _blockB_T_13 = _blockB_T_1 & _mshrs_1_io_status_bits_blockB; // @[Mux.scala:30:73, :32:36]
wire _blockB_T_14 = _blockB_T_2 & _mshrs_2_io_status_bits_blockB; // @[Mux.scala:30:73, :32:36]
wire _blockB_T_15 = _blockB_T_3 & _mshrs_3_io_status_bits_blockB; // @[Mux.scala:30:73, :32:36]
wire _blockB_T_16 = _blockB_T_4 & _mshrs_4_io_status_bits_blockB; // @[Mux.scala:30:73, :32:36]
wire _blockB_T_17 = _blockB_T_5 & _mshrs_5_io_status_bits_blockB; // @[Mux.scala:30:73, :32:36]
wire _blockB_T_18 = _blockB_T_6 & _mshrs_6_io_status_bits_blockB; // @[Mux.scala:30:73, :32:36]
wire _blockB_T_19 = _blockB_T_7 & _mshrs_7_io_status_bits_blockB; // @[Mux.scala:30:73, :32:36]
wire _blockB_T_20 = _blockB_T_8 & _mshrs_8_io_status_bits_blockB; // @[Mux.scala:30:73, :32:36]
wire _blockB_T_21 = _blockB_T_9 & _mshrs_9_io_status_bits_blockB; // @[Mux.scala:30:73, :32:36]
wire _blockB_T_22 = _blockB_T_10 & _mshrs_10_io_status_bits_blockB; // @[Mux.scala:30:73, :32:36]
wire _blockB_T_23 = _blockB_T_11 & _mshrs_11_io_status_bits_blockB; // @[Mux.scala:30:73, :32:36]
wire _blockB_T_24 = _blockB_T_12 | _blockB_T_13; // @[Mux.scala:30:73]
wire _blockB_T_25 = _blockB_T_24 | _blockB_T_14; // @[Mux.scala:30:73]
wire _blockB_T_26 = _blockB_T_25 | _blockB_T_15; // @[Mux.scala:30:73]
wire _blockB_T_27 = _blockB_T_26 | _blockB_T_16; // @[Mux.scala:30:73]
wire _blockB_T_28 = _blockB_T_27 | _blockB_T_17; // @[Mux.scala:30:73]
wire _blockB_T_29 = _blockB_T_28 | _blockB_T_18; // @[Mux.scala:30:73]
wire _blockB_T_30 = _blockB_T_29 | _blockB_T_19; // @[Mux.scala:30:73]
wire _blockB_T_31 = _blockB_T_30 | _blockB_T_20; // @[Mux.scala:30:73]
wire _blockB_T_32 = _blockB_T_31 | _blockB_T_21; // @[Mux.scala:30:73]
wire _blockB_T_33 = _blockB_T_32 | _blockB_T_22; // @[Mux.scala:30:73]
wire _blockB_T_34 = _blockB_T_33 | _blockB_T_23; // @[Mux.scala:30:73]
wire _blockB_WIRE = _blockB_T_34; // @[Mux.scala:30:73]
wire _blockC_T_12 = _blockC_T & _mshrs_0_io_status_bits_blockC; // @[Mux.scala:30:73, :32:36]
wire _blockC_T_13 = _blockC_T_1 & _mshrs_1_io_status_bits_blockC; // @[Mux.scala:30:73, :32:36]
wire _blockC_T_14 = _blockC_T_2 & _mshrs_2_io_status_bits_blockC; // @[Mux.scala:30:73, :32:36]
wire _blockC_T_15 = _blockC_T_3 & _mshrs_3_io_status_bits_blockC; // @[Mux.scala:30:73, :32:36]
wire _blockC_T_16 = _blockC_T_4 & _mshrs_4_io_status_bits_blockC; // @[Mux.scala:30:73, :32:36]
wire _blockC_T_17 = _blockC_T_5 & _mshrs_5_io_status_bits_blockC; // @[Mux.scala:30:73, :32:36]
wire _blockC_T_18 = _blockC_T_6 & _mshrs_6_io_status_bits_blockC; // @[Mux.scala:30:73, :32:36]
wire _blockC_T_19 = _blockC_T_7 & _mshrs_7_io_status_bits_blockC; // @[Mux.scala:30:73, :32:36]
wire _blockC_T_20 = _blockC_T_8 & _mshrs_8_io_status_bits_blockC; // @[Mux.scala:30:73, :32:36]
wire _blockC_T_21 = _blockC_T_9 & _mshrs_9_io_status_bits_blockC; // @[Mux.scala:30:73, :32:36]
wire _blockC_T_22 = _blockC_T_10 & _mshrs_10_io_status_bits_blockC; // @[Mux.scala:30:73, :32:36]
wire _blockC_T_23 = _blockC_T_11 & _mshrs_11_io_status_bits_blockC; // @[Mux.scala:30:73, :32:36]
wire _blockC_T_24 = _blockC_T_12 | _blockC_T_13; // @[Mux.scala:30:73]
wire _blockC_T_25 = _blockC_T_24 | _blockC_T_14; // @[Mux.scala:30:73]
wire _blockC_T_26 = _blockC_T_25 | _blockC_T_15; // @[Mux.scala:30:73]
wire _blockC_T_27 = _blockC_T_26 | _blockC_T_16; // @[Mux.scala:30:73]
wire _blockC_T_28 = _blockC_T_27 | _blockC_T_17; // @[Mux.scala:30:73]
wire _blockC_T_29 = _blockC_T_28 | _blockC_T_18; // @[Mux.scala:30:73]
wire _blockC_T_30 = _blockC_T_29 | _blockC_T_19; // @[Mux.scala:30:73]
wire _blockC_T_31 = _blockC_T_30 | _blockC_T_20; // @[Mux.scala:30:73]
wire _blockC_T_32 = _blockC_T_31 | _blockC_T_21; // @[Mux.scala:30:73]
wire _blockC_T_33 = _blockC_T_32 | _blockC_T_22; // @[Mux.scala:30:73]
wire _blockC_T_34 = _blockC_T_33 | _blockC_T_23; // @[Mux.scala:30:73]
wire _blockC_WIRE = _blockC_T_34; // @[Mux.scala:30:73]
wire blockC = _blockC_WIRE & request_bits_prio_2; // @[Mux.scala:30:73]
wire _nestB_T_12 = _nestB_T & _mshrs_0_io_status_bits_nestB; // @[Mux.scala:30:73, :32:36]
wire _nestB_T_13 = _nestB_T_1 & _mshrs_1_io_status_bits_nestB; // @[Mux.scala:30:73, :32:36]
wire _nestB_T_14 = _nestB_T_2 & _mshrs_2_io_status_bits_nestB; // @[Mux.scala:30:73, :32:36]
wire _nestB_T_15 = _nestB_T_3 & _mshrs_3_io_status_bits_nestB; // @[Mux.scala:30:73, :32:36]
wire _nestB_T_16 = _nestB_T_4 & _mshrs_4_io_status_bits_nestB; // @[Mux.scala:30:73, :32:36]
wire _nestB_T_17 = _nestB_T_5 & _mshrs_5_io_status_bits_nestB; // @[Mux.scala:30:73, :32:36]
wire _nestB_T_18 = _nestB_T_6 & _mshrs_6_io_status_bits_nestB; // @[Mux.scala:30:73, :32:36]
wire _nestB_T_19 = _nestB_T_7 & _mshrs_7_io_status_bits_nestB; // @[Mux.scala:30:73, :32:36]
wire _nestB_T_20 = _nestB_T_8 & _mshrs_8_io_status_bits_nestB; // @[Mux.scala:30:73, :32:36]
wire _nestB_T_21 = _nestB_T_9 & _mshrs_9_io_status_bits_nestB; // @[Mux.scala:30:73, :32:36]
wire _nestB_T_22 = _nestB_T_10 & _mshrs_10_io_status_bits_nestB; // @[Mux.scala:30:73, :32:36]
wire _nestB_T_23 = _nestB_T_11 & _mshrs_11_io_status_bits_nestB; // @[Mux.scala:30:73, :32:36]
wire _nestB_T_24 = _nestB_T_12 | _nestB_T_13; // @[Mux.scala:30:73]
wire _nestB_T_25 = _nestB_T_24 | _nestB_T_14; // @[Mux.scala:30:73]
wire _nestB_T_26 = _nestB_T_25 | _nestB_T_15; // @[Mux.scala:30:73]
wire _nestB_T_27 = _nestB_T_26 | _nestB_T_16; // @[Mux.scala:30:73]
wire _nestB_T_28 = _nestB_T_27 | _nestB_T_17; // @[Mux.scala:30:73]
wire _nestB_T_29 = _nestB_T_28 | _nestB_T_18; // @[Mux.scala:30:73]
wire _nestB_T_30 = _nestB_T_29 | _nestB_T_19; // @[Mux.scala:30:73]
wire _nestB_T_31 = _nestB_T_30 | _nestB_T_20; // @[Mux.scala:30:73]
wire _nestB_T_32 = _nestB_T_31 | _nestB_T_21; // @[Mux.scala:30:73]
wire _nestB_T_33 = _nestB_T_32 | _nestB_T_22; // @[Mux.scala:30:73]
wire _nestB_T_34 = _nestB_T_33 | _nestB_T_23; // @[Mux.scala:30:73]
wire _nestB_WIRE = _nestB_T_34; // @[Mux.scala:30:73]
wire _nestC_T_12 = _nestC_T & _mshrs_0_io_status_bits_nestC; // @[Mux.scala:30:73, :32:36]
wire _nestC_T_13 = _nestC_T_1 & _mshrs_1_io_status_bits_nestC; // @[Mux.scala:30:73, :32:36]
wire _nestC_T_14 = _nestC_T_2 & _mshrs_2_io_status_bits_nestC; // @[Mux.scala:30:73, :32:36]
wire _nestC_T_15 = _nestC_T_3 & _mshrs_3_io_status_bits_nestC; // @[Mux.scala:30:73, :32:36]
wire _nestC_T_16 = _nestC_T_4 & _mshrs_4_io_status_bits_nestC; // @[Mux.scala:30:73, :32:36]
wire _nestC_T_17 = _nestC_T_5 & _mshrs_5_io_status_bits_nestC; // @[Mux.scala:30:73, :32:36]
wire _nestC_T_18 = _nestC_T_6 & _mshrs_6_io_status_bits_nestC; // @[Mux.scala:30:73, :32:36]
wire _nestC_T_19 = _nestC_T_7 & _mshrs_7_io_status_bits_nestC; // @[Mux.scala:30:73, :32:36]
wire _nestC_T_20 = _nestC_T_8 & _mshrs_8_io_status_bits_nestC; // @[Mux.scala:30:73, :32:36]
wire _nestC_T_21 = _nestC_T_9 & _mshrs_9_io_status_bits_nestC; // @[Mux.scala:30:73, :32:36]
wire _nestC_T_22 = _nestC_T_10 & _mshrs_10_io_status_bits_nestC; // @[Mux.scala:30:73, :32:36]
wire _nestC_T_23 = _nestC_T_11 & _mshrs_11_io_status_bits_nestC; // @[Mux.scala:30:73, :32:36]
wire _nestC_T_24 = _nestC_T_12 | _nestC_T_13; // @[Mux.scala:30:73]
wire _nestC_T_25 = _nestC_T_24 | _nestC_T_14; // @[Mux.scala:30:73]
wire _nestC_T_26 = _nestC_T_25 | _nestC_T_15; // @[Mux.scala:30:73]
wire _nestC_T_27 = _nestC_T_26 | _nestC_T_16; // @[Mux.scala:30:73]
wire _nestC_T_28 = _nestC_T_27 | _nestC_T_17; // @[Mux.scala:30:73]
wire _nestC_T_29 = _nestC_T_28 | _nestC_T_18; // @[Mux.scala:30:73]
wire _nestC_T_30 = _nestC_T_29 | _nestC_T_19; // @[Mux.scala:30:73]
wire _nestC_T_31 = _nestC_T_30 | _nestC_T_20; // @[Mux.scala:30:73]
wire _nestC_T_32 = _nestC_T_31 | _nestC_T_21; // @[Mux.scala:30:73]
wire _nestC_T_33 = _nestC_T_32 | _nestC_T_22; // @[Mux.scala:30:73]
wire _nestC_T_34 = _nestC_T_33 | _nestC_T_23; // @[Mux.scala:30:73]
wire _nestC_WIRE = _nestC_T_34; // @[Mux.scala:30:73]
wire nestC = _nestC_WIRE & request_bits_prio_2; // @[Mux.scala:30:73]
wire _prioFilter_T = ~request_bits_prio_0; // @[Scheduler.scala:163:21, :182:46]
wire [1:0] prioFilter_hi = {request_bits_prio_2, _prioFilter_T}; // @[Scheduler.scala:163:21, :182:{23,46}]
wire [11:0] prioFilter = {prioFilter_hi, 10'h3FF}; // @[Scheduler.scala:182:23]
wire [11:0] lowerMatches = setMatches & prioFilter; // @[Scheduler.scala:172:23, :182:23, :183:33]
wire _queue_T = |lowerMatches; // @[Scheduler.scala:183:33, :185:28]
wire _queue_T_2 = _queue_T; // @[Scheduler.scala:185:{28,32}]
wire _queue_T_3 = ~nestC; // @[Scheduler.scala:180:70, :185:45]
wire _queue_T_4 = _queue_T_2 & _queue_T_3; // @[Scheduler.scala:185:{32,42,45}]
wire _queue_T_6 = _queue_T_4; // @[Scheduler.scala:185:{42,52}]
wire _queue_T_7 = ~blockC; // @[Scheduler.scala:176:70, :185:66]
wire queue = _queue_T_6 & _queue_T_7; // @[Scheduler.scala:185:{52,63,66}]
wire _T_12 = request_valid & queue; // @[Scheduler.scala:163:21, :185:63, :195:31]
wire _bypass_T; // @[Scheduler.scala:213:30]
assign _bypass_T = _T_12; // @[Scheduler.scala:195:31, :213:30]
wire _bypass_T_1; // @[Scheduler.scala:231:32]
assign _bypass_T_1 = _T_12; // @[Scheduler.scala:195:31, :231:32]
wire _bypass_T_2; // @[Scheduler.scala:231:32]
assign _bypass_T_2 = _T_12; // @[Scheduler.scala:195:31, :231:32]
wire _bypass_T_3; // @[Scheduler.scala:231:32]
assign _bypass_T_3 = _T_12; // @[Scheduler.scala:195:31, :231:32]
wire _bypass_T_4; // @[Scheduler.scala:231:32]
assign _bypass_T_4 = _T_12; // @[Scheduler.scala:195:31, :231:32]
wire _bypass_T_5; // @[Scheduler.scala:231:32]
assign _bypass_T_5 = _T_12; // @[Scheduler.scala:195:31, :231:32]
wire _bypass_T_6; // @[Scheduler.scala:231:32]
assign _bypass_T_6 = _T_12; // @[Scheduler.scala:195:31, :231:32]
wire _bypass_T_7; // @[Scheduler.scala:231:32]
assign _bypass_T_7 = _T_12; // @[Scheduler.scala:195:31, :231:32]
wire _bypass_T_8; // @[Scheduler.scala:231:32]
assign _bypass_T_8 = _T_12; // @[Scheduler.scala:195:31, :231:32]
wire _bypass_T_9; // @[Scheduler.scala:231:32]
assign _bypass_T_9 = _T_12; // @[Scheduler.scala:195:31, :231:32]
wire _bypass_T_10; // @[Scheduler.scala:231:32]
assign _bypass_T_10 = _T_12; // @[Scheduler.scala:195:31, :231:32]
wire _bypass_T_11; // @[Scheduler.scala:231:32]
assign _bypass_T_11 = _T_12; // @[Scheduler.scala:195:31, :231:32]
wire _bypass_T_12; // @[Scheduler.scala:231:32]
assign _bypass_T_12 = _T_12; // @[Scheduler.scala:195:31, :231:32]
wire _requests_io_push_valid_T; // @[Scheduler.scala:270:43]
assign _requests_io_push_valid_T = _T_12; // @[Scheduler.scala:195:31, :270:43]
wire _lowerMatches1_T = lowerMatches[11]; // @[Scheduler.scala:183:33, :200:21]
wire _lowerMatches1_T_2 = lowerMatches[10]; // @[Scheduler.scala:183:33, :201:21]
wire [11:0] _lowerMatches1_T_4 = _lowerMatches1_T_2 ? 12'h400 : lowerMatches; // @[Scheduler.scala:183:33, :201:{8,21}]
wire [11:0] lowerMatches1 = _lowerMatches1_T ? 12'h800 : _lowerMatches1_T_4; // @[Scheduler.scala:200:{8,21}, :201:8]
wire [11:0] _requests_io_push_bits_index_T = lowerMatches1; // @[Scheduler.scala:200:8, :274:30]
wire [23:0] _GEN_1 = {2{mshr_selectOH}}; // @[Scheduler.scala:121:70, :206:30]
wire [23:0] selected_requests_hi; // @[Scheduler.scala:206:30]
assign selected_requests_hi = _GEN_1; // @[Scheduler.scala:206:30]
wire [23:0] pop_index_hi; // @[Scheduler.scala:241:31]
assign pop_index_hi = _GEN_1; // @[Scheduler.scala:206:30, :241:31]
wire [35:0] _selected_requests_T = {selected_requests_hi, mshr_selectOH}; // @[Scheduler.scala:121:70, :206:30]
wire [35:0] selected_requests = _selected_requests_T & _requests_io_valid; // @[Scheduler.scala:70:24, :206:{30,76}]
wire [11:0] _a_pop_T = selected_requests[11:0]; // @[Scheduler.scala:206:76, :207:32]
wire a_pop = |_a_pop_T; // @[Scheduler.scala:207:{32,79}]
wire [11:0] _b_pop_T = selected_requests[23:12]; // @[Scheduler.scala:206:76, :208:32]
wire b_pop = |_b_pop_T; // @[Scheduler.scala:208:{32,79}]
wire _bypassMatches_T_4 = b_pop; // @[Scheduler.scala:208:79, :211:76]
wire [11:0] _c_pop_T = selected_requests[35:24]; // @[Scheduler.scala:206:76, :209:32]
wire c_pop = |_c_pop_T; // @[Scheduler.scala:209:{32,79}]
wire [11:0] _bypassMatches_T = mshr_selectOH & lowerMatches1; // @[Scheduler.scala:121:70, :200:8, :210:38]
wire _bypassMatches_T_1 = |_bypassMatches_T; // @[Scheduler.scala:210:{38,55}]
wire _bypassMatches_T_2 = c_pop | request_bits_prio_2; // @[Scheduler.scala:163:21, :209:79, :211:33]
wire _bypassMatches_T_3 = ~c_pop; // @[Scheduler.scala:209:79, :211:58]
wire _bypassMatches_T_5 = ~b_pop; // @[Scheduler.scala:208:79, :211:101]
wire _bypassMatches_T_6 = ~a_pop; // @[Scheduler.scala:207:79, :211:109]
wire _bypassMatches_T_7 = _bypassMatches_T_4 ? _bypassMatches_T_5 : _bypassMatches_T_6; // @[Scheduler.scala:211:{69,76,101,109}]
wire _bypassMatches_T_8 = _bypassMatches_T_2 ? _bypassMatches_T_3 : _bypassMatches_T_7; // @[Scheduler.scala:211:{26,33,58,69}]
wire bypassMatches = _bypassMatches_T_1 & _bypassMatches_T_8; // @[Scheduler.scala:210:{55,59}, :211:26]
wire _may_pop_T = a_pop | b_pop; // @[Scheduler.scala:207:79, :208:79, :212:23]
wire may_pop = _may_pop_T | c_pop; // @[Scheduler.scala:209:79, :212:{23,32}]
wire bypass = _bypass_T & bypassMatches; // @[Scheduler.scala:210:59, :213:{30,39}]
wire _will_reload_T = may_pop | bypass; // @[Scheduler.scala:212:32, :213:39, :214:49]
wire will_reload = schedule_reload & _will_reload_T; // @[Mux.scala:30:73]
wire _GEN_2 = schedule_reload & may_pop; // @[Mux.scala:30:73]
wire _will_pop_T; // @[Scheduler.scala:215:34]
assign _will_pop_T = _GEN_2; // @[Scheduler.scala:215:34]
wire _mshr_uses_directory_assuming_no_bypass_T; // @[Scheduler.scala:247:64]
assign _mshr_uses_directory_assuming_no_bypass_T = _GEN_2; // @[Scheduler.scala:215:34, :247:64]
wire _will_pop_T_1 = ~bypass; // @[Scheduler.scala:213:39, :215:48]
wire will_pop = _will_pop_T & _will_pop_T_1; // @[Scheduler.scala:215:{34,45,48}]
wire a_pop_1 = _requests_io_valid[0]; // @[Scheduler.scala:70:24, :225:34]
wire b_pop_1 = _requests_io_valid[12]; // @[Scheduler.scala:70:24, :226:34]
wire _bypassMatches_T_12 = b_pop_1; // @[Scheduler.scala:226:34, :229:78]
wire c_pop_1 = _requests_io_valid[24]; // @[Scheduler.scala:70:24, :227:34]
wire _bypassMatches_T_9 = lowerMatches1[0]; // @[Scheduler.scala:200:8, :228:38]
wire _bypassMatches_T_10 = c_pop_1 | request_bits_prio_2; // @[Scheduler.scala:163:21, :227:34, :229:35]
wire _bypassMatches_T_11 = ~c_pop_1; // @[Scheduler.scala:227:34, :229:60]
wire _bypassMatches_T_13 = ~b_pop_1; // @[Scheduler.scala:226:34, :229:103]
wire _bypassMatches_T_14 = ~a_pop_1; // @[Scheduler.scala:225:34, :229:111]
wire _bypassMatches_T_15 = _bypassMatches_T_12 ? _bypassMatches_T_13 : _bypassMatches_T_14; // @[Scheduler.scala:229:{71,78,103,111}]
wire _bypassMatches_T_16 = _bypassMatches_T_10 ? _bypassMatches_T_11 : _bypassMatches_T_15; // @[Scheduler.scala:229:{28,35,60,71}]
wire bypassMatches_1 = _bypassMatches_T_9 & _bypassMatches_T_16; // @[Scheduler.scala:228:{38,42}, :229:28]
wire _may_pop_T_1 = a_pop_1 | b_pop_1; // @[Scheduler.scala:225:34, :226:34, :230:25]
wire may_pop_1 = _may_pop_T_1 | c_pop_1; // @[Scheduler.scala:227:34, :230:{25,34}]
wire bypass_1 = _bypass_T_1 & bypassMatches_1; // @[Scheduler.scala:228:42, :231:{32,41}]
wire _will_reload_T_1 = may_pop_1 | bypass_1; // @[Scheduler.scala:230:34, :231:41, :232:61]
wire will_reload_1 = _mshrs_0_io_schedule_bits_reload & _will_reload_T_1; // @[Scheduler.scala:71:46, :232:{49,61}]
wire _view__T_prio_0 = bypass_1 ? _view__WIRE_prio_0 : _requests_io_data_prio_0; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire _view__T_prio_1 = ~bypass_1 & _requests_io_data_prio_1; // @[Scheduler.scala:70:24, :231:41, :233:78]
wire _view__T_prio_2 = bypass_1 ? _view__WIRE_prio_2 : _requests_io_data_prio_2; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire _view__T_control = bypass_1 ? _view__WIRE_control : _requests_io_data_control; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire [2:0] _view__T_opcode = bypass_1 ? _view__WIRE_opcode : _requests_io_data_opcode; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire [2:0] _view__T_param = bypass_1 ? _view__WIRE_param : _requests_io_data_param; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire [2:0] _view__T_size = bypass_1 ? _view__WIRE_size : _requests_io_data_size; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire [5:0] _view__T_source = bypass_1 ? _view__WIRE_source : _requests_io_data_source; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire [8:0] _view__T_tag = bypass_1 ? _view__WIRE_tag : _requests_io_data_tag; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire [5:0] _view__T_offset = bypass_1 ? _view__WIRE_offset : _requests_io_data_offset; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire [5:0] _view__T_put = bypass_1 ? _view__WIRE_put : _requests_io_data_put; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire _mshrs_0_io_allocate_bits_repeat_T = mshrs_0_io_allocate_bits_tag == _mshrs_0_io_status_bits_tag; // @[Scheduler.scala:71:46, :233:72, :235:57, :280:83, :282:70]
wire _mshrs_0_io_allocate_valid_T = sel & will_reload_1; // @[Scheduler.scala:223:28, :232:49, :236:32]
wire a_pop_2 = _requests_io_valid[1]; // @[Scheduler.scala:70:24, :225:34]
wire b_pop_2 = _requests_io_valid[13]; // @[Scheduler.scala:70:24, :226:34]
wire _bypassMatches_T_20 = b_pop_2; // @[Scheduler.scala:226:34, :229:78]
wire c_pop_2 = _requests_io_valid[25]; // @[Scheduler.scala:70:24, :227:34]
wire _bypassMatches_T_17 = lowerMatches1[1]; // @[Scheduler.scala:200:8, :228:38]
wire _bypassMatches_T_18 = c_pop_2 | request_bits_prio_2; // @[Scheduler.scala:163:21, :227:34, :229:35]
wire _bypassMatches_T_19 = ~c_pop_2; // @[Scheduler.scala:227:34, :229:60]
wire _bypassMatches_T_21 = ~b_pop_2; // @[Scheduler.scala:226:34, :229:103]
wire _bypassMatches_T_22 = ~a_pop_2; // @[Scheduler.scala:225:34, :229:111]
wire _bypassMatches_T_23 = _bypassMatches_T_20 ? _bypassMatches_T_21 : _bypassMatches_T_22; // @[Scheduler.scala:229:{71,78,103,111}]
wire _bypassMatches_T_24 = _bypassMatches_T_18 ? _bypassMatches_T_19 : _bypassMatches_T_23; // @[Scheduler.scala:229:{28,35,60,71}]
wire bypassMatches_2 = _bypassMatches_T_17 & _bypassMatches_T_24; // @[Scheduler.scala:228:{38,42}, :229:28]
wire _may_pop_T_2 = a_pop_2 | b_pop_2; // @[Scheduler.scala:225:34, :226:34, :230:25]
wire may_pop_2 = _may_pop_T_2 | c_pop_2; // @[Scheduler.scala:227:34, :230:{25,34}]
wire bypass_2 = _bypass_T_2 & bypassMatches_2; // @[Scheduler.scala:228:42, :231:{32,41}]
wire _will_reload_T_2 = may_pop_2 | bypass_2; // @[Scheduler.scala:230:34, :231:41, :232:61]
wire will_reload_2 = _mshrs_1_io_schedule_bits_reload & _will_reload_T_2; // @[Scheduler.scala:71:46, :232:{49,61}]
wire _view__T_1_prio_0 = bypass_2 ? _view__WIRE_1_prio_0 : _requests_io_data_prio_0; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire _view__T_1_prio_1 = ~bypass_2 & _requests_io_data_prio_1; // @[Scheduler.scala:70:24, :231:41, :233:78]
wire _view__T_1_prio_2 = bypass_2 ? _view__WIRE_1_prio_2 : _requests_io_data_prio_2; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire _view__T_1_control = bypass_2 ? _view__WIRE_1_control : _requests_io_data_control; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire [2:0] _view__T_1_opcode = bypass_2 ? _view__WIRE_1_opcode : _requests_io_data_opcode; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire [2:0] _view__T_1_param = bypass_2 ? _view__WIRE_1_param : _requests_io_data_param; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire [2:0] _view__T_1_size = bypass_2 ? _view__WIRE_1_size : _requests_io_data_size; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire [5:0] _view__T_1_source = bypass_2 ? _view__WIRE_1_source : _requests_io_data_source; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire [8:0] _view__T_1_tag = bypass_2 ? _view__WIRE_1_tag : _requests_io_data_tag; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire [5:0] _view__T_1_offset = bypass_2 ? _view__WIRE_1_offset : _requests_io_data_offset; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire [5:0] _view__T_1_put = bypass_2 ? _view__WIRE_1_put : _requests_io_data_put; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire _mshrs_1_io_allocate_bits_repeat_T = mshrs_1_io_allocate_bits_tag == _mshrs_1_io_status_bits_tag; // @[Scheduler.scala:71:46, :233:72, :235:57, :280:83, :282:70]
wire _mshrs_1_io_allocate_valid_T = sel_1 & will_reload_2; // @[Scheduler.scala:223:28, :232:49, :236:32]
wire a_pop_3 = _requests_io_valid[2]; // @[Scheduler.scala:70:24, :225:34]
wire b_pop_3 = _requests_io_valid[14]; // @[Scheduler.scala:70:24, :226:34]
wire _bypassMatches_T_28 = b_pop_3; // @[Scheduler.scala:226:34, :229:78]
wire c_pop_3 = _requests_io_valid[26]; // @[Scheduler.scala:70:24, :227:34]
wire _bypassMatches_T_25 = lowerMatches1[2]; // @[Scheduler.scala:200:8, :228:38]
wire _bypassMatches_T_26 = c_pop_3 | request_bits_prio_2; // @[Scheduler.scala:163:21, :227:34, :229:35]
wire _bypassMatches_T_27 = ~c_pop_3; // @[Scheduler.scala:227:34, :229:60]
wire _bypassMatches_T_29 = ~b_pop_3; // @[Scheduler.scala:226:34, :229:103]
wire _bypassMatches_T_30 = ~a_pop_3; // @[Scheduler.scala:225:34, :229:111]
wire _bypassMatches_T_31 = _bypassMatches_T_28 ? _bypassMatches_T_29 : _bypassMatches_T_30; // @[Scheduler.scala:229:{71,78,103,111}]
wire _bypassMatches_T_32 = _bypassMatches_T_26 ? _bypassMatches_T_27 : _bypassMatches_T_31; // @[Scheduler.scala:229:{28,35,60,71}]
wire bypassMatches_3 = _bypassMatches_T_25 & _bypassMatches_T_32; // @[Scheduler.scala:228:{38,42}, :229:28]
wire _may_pop_T_3 = a_pop_3 | b_pop_3; // @[Scheduler.scala:225:34, :226:34, :230:25]
wire may_pop_3 = _may_pop_T_3 | c_pop_3; // @[Scheduler.scala:227:34, :230:{25,34}]
wire bypass_3 = _bypass_T_3 & bypassMatches_3; // @[Scheduler.scala:228:42, :231:{32,41}]
wire _will_reload_T_3 = may_pop_3 | bypass_3; // @[Scheduler.scala:230:34, :231:41, :232:61]
wire will_reload_3 = _mshrs_2_io_schedule_bits_reload & _will_reload_T_3; // @[Scheduler.scala:71:46, :232:{49,61}]
wire _view__T_2_prio_0 = bypass_3 ? _view__WIRE_2_prio_0 : _requests_io_data_prio_0; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire _view__T_2_prio_1 = ~bypass_3 & _requests_io_data_prio_1; // @[Scheduler.scala:70:24, :231:41, :233:78]
wire _view__T_2_prio_2 = bypass_3 ? _view__WIRE_2_prio_2 : _requests_io_data_prio_2; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire _view__T_2_control = bypass_3 ? _view__WIRE_2_control : _requests_io_data_control; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire [2:0] _view__T_2_opcode = bypass_3 ? _view__WIRE_2_opcode : _requests_io_data_opcode; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire [2:0] _view__T_2_param = bypass_3 ? _view__WIRE_2_param : _requests_io_data_param; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire [2:0] _view__T_2_size = bypass_3 ? _view__WIRE_2_size : _requests_io_data_size; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire [5:0] _view__T_2_source = bypass_3 ? _view__WIRE_2_source : _requests_io_data_source; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire [8:0] _view__T_2_tag = bypass_3 ? _view__WIRE_2_tag : _requests_io_data_tag; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire [5:0] _view__T_2_offset = bypass_3 ? _view__WIRE_2_offset : _requests_io_data_offset; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire [5:0] _view__T_2_put = bypass_3 ? _view__WIRE_2_put : _requests_io_data_put; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire _mshrs_2_io_allocate_bits_repeat_T = mshrs_2_io_allocate_bits_tag == _mshrs_2_io_status_bits_tag; // @[Scheduler.scala:71:46, :233:72, :235:57, :280:83, :282:70]
wire _mshrs_2_io_allocate_valid_T = sel_2 & will_reload_3; // @[Scheduler.scala:223:28, :232:49, :236:32]
wire a_pop_4 = _requests_io_valid[3]; // @[Scheduler.scala:70:24, :225:34]
wire b_pop_4 = _requests_io_valid[15]; // @[Scheduler.scala:70:24, :226:34]
wire _bypassMatches_T_36 = b_pop_4; // @[Scheduler.scala:226:34, :229:78]
wire c_pop_4 = _requests_io_valid[27]; // @[Scheduler.scala:70:24, :227:34]
wire _bypassMatches_T_33 = lowerMatches1[3]; // @[Scheduler.scala:200:8, :228:38]
wire _bypassMatches_T_34 = c_pop_4 | request_bits_prio_2; // @[Scheduler.scala:163:21, :227:34, :229:35]
wire _bypassMatches_T_35 = ~c_pop_4; // @[Scheduler.scala:227:34, :229:60]
wire _bypassMatches_T_37 = ~b_pop_4; // @[Scheduler.scala:226:34, :229:103]
wire _bypassMatches_T_38 = ~a_pop_4; // @[Scheduler.scala:225:34, :229:111]
wire _bypassMatches_T_39 = _bypassMatches_T_36 ? _bypassMatches_T_37 : _bypassMatches_T_38; // @[Scheduler.scala:229:{71,78,103,111}]
wire _bypassMatches_T_40 = _bypassMatches_T_34 ? _bypassMatches_T_35 : _bypassMatches_T_39; // @[Scheduler.scala:229:{28,35,60,71}]
wire bypassMatches_4 = _bypassMatches_T_33 & _bypassMatches_T_40; // @[Scheduler.scala:228:{38,42}, :229:28]
wire _may_pop_T_4 = a_pop_4 | b_pop_4; // @[Scheduler.scala:225:34, :226:34, :230:25]
wire may_pop_4 = _may_pop_T_4 | c_pop_4; // @[Scheduler.scala:227:34, :230:{25,34}]
wire bypass_4 = _bypass_T_4 & bypassMatches_4; // @[Scheduler.scala:228:42, :231:{32,41}]
wire _will_reload_T_4 = may_pop_4 | bypass_4; // @[Scheduler.scala:230:34, :231:41, :232:61]
wire will_reload_4 = _mshrs_3_io_schedule_bits_reload & _will_reload_T_4; // @[Scheduler.scala:71:46, :232:{49,61}]
wire _view__T_3_prio_0 = bypass_4 ? _view__WIRE_3_prio_0 : _requests_io_data_prio_0; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire _view__T_3_prio_1 = ~bypass_4 & _requests_io_data_prio_1; // @[Scheduler.scala:70:24, :231:41, :233:78]
wire _view__T_3_prio_2 = bypass_4 ? _view__WIRE_3_prio_2 : _requests_io_data_prio_2; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire _view__T_3_control = bypass_4 ? _view__WIRE_3_control : _requests_io_data_control; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire [2:0] _view__T_3_opcode = bypass_4 ? _view__WIRE_3_opcode : _requests_io_data_opcode; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire [2:0] _view__T_3_param = bypass_4 ? _view__WIRE_3_param : _requests_io_data_param; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire [2:0] _view__T_3_size = bypass_4 ? _view__WIRE_3_size : _requests_io_data_size; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire [5:0] _view__T_3_source = bypass_4 ? _view__WIRE_3_source : _requests_io_data_source; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire [8:0] _view__T_3_tag = bypass_4 ? _view__WIRE_3_tag : _requests_io_data_tag; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire [5:0] _view__T_3_offset = bypass_4 ? _view__WIRE_3_offset : _requests_io_data_offset; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire [5:0] _view__T_3_put = bypass_4 ? _view__WIRE_3_put : _requests_io_data_put; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire _mshrs_3_io_allocate_bits_repeat_T = mshrs_3_io_allocate_bits_tag == _mshrs_3_io_status_bits_tag; // @[Scheduler.scala:71:46, :233:72, :235:57, :280:83, :282:70]
wire _mshrs_3_io_allocate_valid_T = sel_3 & will_reload_4; // @[Scheduler.scala:223:28, :232:49, :236:32]
wire a_pop_5 = _requests_io_valid[4]; // @[Scheduler.scala:70:24, :225:34]
wire b_pop_5 = _requests_io_valid[16]; // @[Scheduler.scala:70:24, :226:34]
wire _bypassMatches_T_44 = b_pop_5; // @[Scheduler.scala:226:34, :229:78]
wire c_pop_5 = _requests_io_valid[28]; // @[Scheduler.scala:70:24, :227:34]
wire _bypassMatches_T_41 = lowerMatches1[4]; // @[Scheduler.scala:200:8, :228:38]
wire _bypassMatches_T_42 = c_pop_5 | request_bits_prio_2; // @[Scheduler.scala:163:21, :227:34, :229:35]
wire _bypassMatches_T_43 = ~c_pop_5; // @[Scheduler.scala:227:34, :229:60]
wire _bypassMatches_T_45 = ~b_pop_5; // @[Scheduler.scala:226:34, :229:103]
wire _bypassMatches_T_46 = ~a_pop_5; // @[Scheduler.scala:225:34, :229:111]
wire _bypassMatches_T_47 = _bypassMatches_T_44 ? _bypassMatches_T_45 : _bypassMatches_T_46; // @[Scheduler.scala:229:{71,78,103,111}]
wire _bypassMatches_T_48 = _bypassMatches_T_42 ? _bypassMatches_T_43 : _bypassMatches_T_47; // @[Scheduler.scala:229:{28,35,60,71}]
wire bypassMatches_5 = _bypassMatches_T_41 & _bypassMatches_T_48; // @[Scheduler.scala:228:{38,42}, :229:28]
wire _may_pop_T_5 = a_pop_5 | b_pop_5; // @[Scheduler.scala:225:34, :226:34, :230:25]
wire may_pop_5 = _may_pop_T_5 | c_pop_5; // @[Scheduler.scala:227:34, :230:{25,34}]
wire bypass_5 = _bypass_T_5 & bypassMatches_5; // @[Scheduler.scala:228:42, :231:{32,41}]
wire _will_reload_T_5 = may_pop_5 | bypass_5; // @[Scheduler.scala:230:34, :231:41, :232:61]
wire will_reload_5 = _mshrs_4_io_schedule_bits_reload & _will_reload_T_5; // @[Scheduler.scala:71:46, :232:{49,61}]
wire _view__T_4_prio_0 = bypass_5 ? _view__WIRE_4_prio_0 : _requests_io_data_prio_0; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire _view__T_4_prio_1 = ~bypass_5 & _requests_io_data_prio_1; // @[Scheduler.scala:70:24, :231:41, :233:78]
wire _view__T_4_prio_2 = bypass_5 ? _view__WIRE_4_prio_2 : _requests_io_data_prio_2; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire _view__T_4_control = bypass_5 ? _view__WIRE_4_control : _requests_io_data_control; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire [2:0] _view__T_4_opcode = bypass_5 ? _view__WIRE_4_opcode : _requests_io_data_opcode; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire [2:0] _view__T_4_param = bypass_5 ? _view__WIRE_4_param : _requests_io_data_param; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire [2:0] _view__T_4_size = bypass_5 ? _view__WIRE_4_size : _requests_io_data_size; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire [5:0] _view__T_4_source = bypass_5 ? _view__WIRE_4_source : _requests_io_data_source; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire [8:0] _view__T_4_tag = bypass_5 ? _view__WIRE_4_tag : _requests_io_data_tag; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire [5:0] _view__T_4_offset = bypass_5 ? _view__WIRE_4_offset : _requests_io_data_offset; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire [5:0] _view__T_4_put = bypass_5 ? _view__WIRE_4_put : _requests_io_data_put; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire _mshrs_4_io_allocate_bits_repeat_T = mshrs_4_io_allocate_bits_tag == _mshrs_4_io_status_bits_tag; // @[Scheduler.scala:71:46, :233:72, :235:57, :280:83, :282:70]
wire _mshrs_4_io_allocate_valid_T = sel_4 & will_reload_5; // @[Scheduler.scala:223:28, :232:49, :236:32]
wire a_pop_6 = _requests_io_valid[5]; // @[Scheduler.scala:70:24, :225:34]
wire b_pop_6 = _requests_io_valid[17]; // @[Scheduler.scala:70:24, :226:34]
wire _bypassMatches_T_52 = b_pop_6; // @[Scheduler.scala:226:34, :229:78]
wire c_pop_6 = _requests_io_valid[29]; // @[Scheduler.scala:70:24, :227:34]
wire _bypassMatches_T_49 = lowerMatches1[5]; // @[Scheduler.scala:200:8, :228:38]
wire _bypassMatches_T_50 = c_pop_6 | request_bits_prio_2; // @[Scheduler.scala:163:21, :227:34, :229:35]
wire _bypassMatches_T_51 = ~c_pop_6; // @[Scheduler.scala:227:34, :229:60]
wire _bypassMatches_T_53 = ~b_pop_6; // @[Scheduler.scala:226:34, :229:103]
wire _bypassMatches_T_54 = ~a_pop_6; // @[Scheduler.scala:225:34, :229:111]
wire _bypassMatches_T_55 = _bypassMatches_T_52 ? _bypassMatches_T_53 : _bypassMatches_T_54; // @[Scheduler.scala:229:{71,78,103,111}]
wire _bypassMatches_T_56 = _bypassMatches_T_50 ? _bypassMatches_T_51 : _bypassMatches_T_55; // @[Scheduler.scala:229:{28,35,60,71}]
wire bypassMatches_6 = _bypassMatches_T_49 & _bypassMatches_T_56; // @[Scheduler.scala:228:{38,42}, :229:28]
wire _may_pop_T_6 = a_pop_6 | b_pop_6; // @[Scheduler.scala:225:34, :226:34, :230:25]
wire may_pop_6 = _may_pop_T_6 | c_pop_6; // @[Scheduler.scala:227:34, :230:{25,34}]
wire bypass_6 = _bypass_T_6 & bypassMatches_6; // @[Scheduler.scala:228:42, :231:{32,41}]
wire _will_reload_T_6 = may_pop_6 | bypass_6; // @[Scheduler.scala:230:34, :231:41, :232:61]
wire will_reload_6 = _mshrs_5_io_schedule_bits_reload & _will_reload_T_6; // @[Scheduler.scala:71:46, :232:{49,61}]
wire _view__T_5_prio_0 = bypass_6 ? _view__WIRE_5_prio_0 : _requests_io_data_prio_0; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire _view__T_5_prio_1 = ~bypass_6 & _requests_io_data_prio_1; // @[Scheduler.scala:70:24, :231:41, :233:78]
wire _view__T_5_prio_2 = bypass_6 ? _view__WIRE_5_prio_2 : _requests_io_data_prio_2; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire _view__T_5_control = bypass_6 ? _view__WIRE_5_control : _requests_io_data_control; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire [2:0] _view__T_5_opcode = bypass_6 ? _view__WIRE_5_opcode : _requests_io_data_opcode; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire [2:0] _view__T_5_param = bypass_6 ? _view__WIRE_5_param : _requests_io_data_param; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire [2:0] _view__T_5_size = bypass_6 ? _view__WIRE_5_size : _requests_io_data_size; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire [5:0] _view__T_5_source = bypass_6 ? _view__WIRE_5_source : _requests_io_data_source; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire [8:0] _view__T_5_tag = bypass_6 ? _view__WIRE_5_tag : _requests_io_data_tag; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire [5:0] _view__T_5_offset = bypass_6 ? _view__WIRE_5_offset : _requests_io_data_offset; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire [5:0] _view__T_5_put = bypass_6 ? _view__WIRE_5_put : _requests_io_data_put; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire _mshrs_5_io_allocate_bits_repeat_T = mshrs_5_io_allocate_bits_tag == _mshrs_5_io_status_bits_tag; // @[Scheduler.scala:71:46, :233:72, :235:57, :280:83, :282:70]
wire _mshrs_5_io_allocate_valid_T = sel_5 & will_reload_6; // @[Scheduler.scala:223:28, :232:49, :236:32]
wire a_pop_7 = _requests_io_valid[6]; // @[Scheduler.scala:70:24, :225:34]
wire b_pop_7 = _requests_io_valid[18]; // @[Scheduler.scala:70:24, :226:34]
wire _bypassMatches_T_60 = b_pop_7; // @[Scheduler.scala:226:34, :229:78]
wire c_pop_7 = _requests_io_valid[30]; // @[Scheduler.scala:70:24, :227:34]
wire _bypassMatches_T_57 = lowerMatches1[6]; // @[Scheduler.scala:200:8, :228:38]
wire _bypassMatches_T_58 = c_pop_7 | request_bits_prio_2; // @[Scheduler.scala:163:21, :227:34, :229:35]
wire _bypassMatches_T_59 = ~c_pop_7; // @[Scheduler.scala:227:34, :229:60]
wire _bypassMatches_T_61 = ~b_pop_7; // @[Scheduler.scala:226:34, :229:103]
wire _bypassMatches_T_62 = ~a_pop_7; // @[Scheduler.scala:225:34, :229:111]
wire _bypassMatches_T_63 = _bypassMatches_T_60 ? _bypassMatches_T_61 : _bypassMatches_T_62; // @[Scheduler.scala:229:{71,78,103,111}]
wire _bypassMatches_T_64 = _bypassMatches_T_58 ? _bypassMatches_T_59 : _bypassMatches_T_63; // @[Scheduler.scala:229:{28,35,60,71}]
wire bypassMatches_7 = _bypassMatches_T_57 & _bypassMatches_T_64; // @[Scheduler.scala:228:{38,42}, :229:28]
wire _may_pop_T_7 = a_pop_7 | b_pop_7; // @[Scheduler.scala:225:34, :226:34, :230:25]
wire may_pop_7 = _may_pop_T_7 | c_pop_7; // @[Scheduler.scala:227:34, :230:{25,34}]
wire bypass_7 = _bypass_T_7 & bypassMatches_7; // @[Scheduler.scala:228:42, :231:{32,41}]
wire _will_reload_T_7 = may_pop_7 | bypass_7; // @[Scheduler.scala:230:34, :231:41, :232:61]
wire will_reload_7 = _mshrs_6_io_schedule_bits_reload & _will_reload_T_7; // @[Scheduler.scala:71:46, :232:{49,61}]
wire _view__T_6_prio_0 = bypass_7 ? _view__WIRE_6_prio_0 : _requests_io_data_prio_0; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire _view__T_6_prio_1 = ~bypass_7 & _requests_io_data_prio_1; // @[Scheduler.scala:70:24, :231:41, :233:78]
wire _view__T_6_prio_2 = bypass_7 ? _view__WIRE_6_prio_2 : _requests_io_data_prio_2; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire _view__T_6_control = bypass_7 ? _view__WIRE_6_control : _requests_io_data_control; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire [2:0] _view__T_6_opcode = bypass_7 ? _view__WIRE_6_opcode : _requests_io_data_opcode; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire [2:0] _view__T_6_param = bypass_7 ? _view__WIRE_6_param : _requests_io_data_param; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire [2:0] _view__T_6_size = bypass_7 ? _view__WIRE_6_size : _requests_io_data_size; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire [5:0] _view__T_6_source = bypass_7 ? _view__WIRE_6_source : _requests_io_data_source; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire [8:0] _view__T_6_tag = bypass_7 ? _view__WIRE_6_tag : _requests_io_data_tag; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire [5:0] _view__T_6_offset = bypass_7 ? _view__WIRE_6_offset : _requests_io_data_offset; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire [5:0] _view__T_6_put = bypass_7 ? _view__WIRE_6_put : _requests_io_data_put; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire _mshrs_6_io_allocate_bits_repeat_T = mshrs_6_io_allocate_bits_tag == _mshrs_6_io_status_bits_tag; // @[Scheduler.scala:71:46, :233:72, :235:57, :280:83, :282:70]
wire _mshrs_6_io_allocate_valid_T = sel_6 & will_reload_7; // @[Scheduler.scala:223:28, :232:49, :236:32]
wire a_pop_8 = _requests_io_valid[7]; // @[Scheduler.scala:70:24, :225:34]
wire b_pop_8 = _requests_io_valid[19]; // @[Scheduler.scala:70:24, :226:34]
wire _bypassMatches_T_68 = b_pop_8; // @[Scheduler.scala:226:34, :229:78]
wire c_pop_8 = _requests_io_valid[31]; // @[Scheduler.scala:70:24, :227:34]
wire _bypassMatches_T_65 = lowerMatches1[7]; // @[Scheduler.scala:200:8, :228:38]
wire _bypassMatches_T_66 = c_pop_8 | request_bits_prio_2; // @[Scheduler.scala:163:21, :227:34, :229:35]
wire _bypassMatches_T_67 = ~c_pop_8; // @[Scheduler.scala:227:34, :229:60]
wire _bypassMatches_T_69 = ~b_pop_8; // @[Scheduler.scala:226:34, :229:103]
wire _bypassMatches_T_70 = ~a_pop_8; // @[Scheduler.scala:225:34, :229:111]
wire _bypassMatches_T_71 = _bypassMatches_T_68 ? _bypassMatches_T_69 : _bypassMatches_T_70; // @[Scheduler.scala:229:{71,78,103,111}]
wire _bypassMatches_T_72 = _bypassMatches_T_66 ? _bypassMatches_T_67 : _bypassMatches_T_71; // @[Scheduler.scala:229:{28,35,60,71}]
wire bypassMatches_8 = _bypassMatches_T_65 & _bypassMatches_T_72; // @[Scheduler.scala:228:{38,42}, :229:28]
wire _may_pop_T_8 = a_pop_8 | b_pop_8; // @[Scheduler.scala:225:34, :226:34, :230:25]
wire may_pop_8 = _may_pop_T_8 | c_pop_8; // @[Scheduler.scala:227:34, :230:{25,34}]
wire bypass_8 = _bypass_T_8 & bypassMatches_8; // @[Scheduler.scala:228:42, :231:{32,41}]
wire _will_reload_T_8 = may_pop_8 | bypass_8; // @[Scheduler.scala:230:34, :231:41, :232:61]
wire will_reload_8 = _mshrs_7_io_schedule_bits_reload & _will_reload_T_8; // @[Scheduler.scala:71:46, :232:{49,61}]
wire _view__T_7_prio_0 = bypass_8 ? _view__WIRE_7_prio_0 : _requests_io_data_prio_0; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire _view__T_7_prio_1 = ~bypass_8 & _requests_io_data_prio_1; // @[Scheduler.scala:70:24, :231:41, :233:78]
wire _view__T_7_prio_2 = bypass_8 ? _view__WIRE_7_prio_2 : _requests_io_data_prio_2; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire _view__T_7_control = bypass_8 ? _view__WIRE_7_control : _requests_io_data_control; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire [2:0] _view__T_7_opcode = bypass_8 ? _view__WIRE_7_opcode : _requests_io_data_opcode; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire [2:0] _view__T_7_param = bypass_8 ? _view__WIRE_7_param : _requests_io_data_param; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire [2:0] _view__T_7_size = bypass_8 ? _view__WIRE_7_size : _requests_io_data_size; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire [5:0] _view__T_7_source = bypass_8 ? _view__WIRE_7_source : _requests_io_data_source; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire [8:0] _view__T_7_tag = bypass_8 ? _view__WIRE_7_tag : _requests_io_data_tag; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire [5:0] _view__T_7_offset = bypass_8 ? _view__WIRE_7_offset : _requests_io_data_offset; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire [5:0] _view__T_7_put = bypass_8 ? _view__WIRE_7_put : _requests_io_data_put; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire _mshrs_7_io_allocate_bits_repeat_T = mshrs_7_io_allocate_bits_tag == _mshrs_7_io_status_bits_tag; // @[Scheduler.scala:71:46, :233:72, :235:57, :280:83, :282:70]
wire _mshrs_7_io_allocate_valid_T = sel_7 & will_reload_8; // @[Scheduler.scala:223:28, :232:49, :236:32]
wire a_pop_9 = _requests_io_valid[8]; // @[Scheduler.scala:70:24, :225:34]
wire b_pop_9 = _requests_io_valid[20]; // @[Scheduler.scala:70:24, :226:34]
wire _bypassMatches_T_76 = b_pop_9; // @[Scheduler.scala:226:34, :229:78]
wire c_pop_9 = _requests_io_valid[32]; // @[Scheduler.scala:70:24, :227:34]
wire _bypassMatches_T_73 = lowerMatches1[8]; // @[Scheduler.scala:200:8, :228:38]
wire _bypassMatches_T_74 = c_pop_9 | request_bits_prio_2; // @[Scheduler.scala:163:21, :227:34, :229:35]
wire _bypassMatches_T_75 = ~c_pop_9; // @[Scheduler.scala:227:34, :229:60]
wire _bypassMatches_T_77 = ~b_pop_9; // @[Scheduler.scala:226:34, :229:103]
wire _bypassMatches_T_78 = ~a_pop_9; // @[Scheduler.scala:225:34, :229:111]
wire _bypassMatches_T_79 = _bypassMatches_T_76 ? _bypassMatches_T_77 : _bypassMatches_T_78; // @[Scheduler.scala:229:{71,78,103,111}]
wire _bypassMatches_T_80 = _bypassMatches_T_74 ? _bypassMatches_T_75 : _bypassMatches_T_79; // @[Scheduler.scala:229:{28,35,60,71}]
wire bypassMatches_9 = _bypassMatches_T_73 & _bypassMatches_T_80; // @[Scheduler.scala:228:{38,42}, :229:28]
wire _may_pop_T_9 = a_pop_9 | b_pop_9; // @[Scheduler.scala:225:34, :226:34, :230:25]
wire may_pop_9 = _may_pop_T_9 | c_pop_9; // @[Scheduler.scala:227:34, :230:{25,34}]
wire bypass_9 = _bypass_T_9 & bypassMatches_9; // @[Scheduler.scala:228:42, :231:{32,41}]
wire _will_reload_T_9 = may_pop_9 | bypass_9; // @[Scheduler.scala:230:34, :231:41, :232:61]
wire will_reload_9 = _mshrs_8_io_schedule_bits_reload & _will_reload_T_9; // @[Scheduler.scala:71:46, :232:{49,61}]
wire _view__T_8_prio_0 = bypass_9 ? _view__WIRE_8_prio_0 : _requests_io_data_prio_0; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire _view__T_8_prio_1 = ~bypass_9 & _requests_io_data_prio_1; // @[Scheduler.scala:70:24, :231:41, :233:78]
wire _view__T_8_prio_2 = bypass_9 ? _view__WIRE_8_prio_2 : _requests_io_data_prio_2; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire _view__T_8_control = bypass_9 ? _view__WIRE_8_control : _requests_io_data_control; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire [2:0] _view__T_8_opcode = bypass_9 ? _view__WIRE_8_opcode : _requests_io_data_opcode; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire [2:0] _view__T_8_param = bypass_9 ? _view__WIRE_8_param : _requests_io_data_param; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire [2:0] _view__T_8_size = bypass_9 ? _view__WIRE_8_size : _requests_io_data_size; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire [5:0] _view__T_8_source = bypass_9 ? _view__WIRE_8_source : _requests_io_data_source; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire [8:0] _view__T_8_tag = bypass_9 ? _view__WIRE_8_tag : _requests_io_data_tag; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire [5:0] _view__T_8_offset = bypass_9 ? _view__WIRE_8_offset : _requests_io_data_offset; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire [5:0] _view__T_8_put = bypass_9 ? _view__WIRE_8_put : _requests_io_data_put; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire _mshrs_8_io_allocate_bits_repeat_T = mshrs_8_io_allocate_bits_tag == _mshrs_8_io_status_bits_tag; // @[Scheduler.scala:71:46, :233:72, :235:57, :280:83, :282:70]
wire _mshrs_8_io_allocate_valid_T = sel_8 & will_reload_9; // @[Scheduler.scala:223:28, :232:49, :236:32]
wire a_pop_10 = _requests_io_valid[9]; // @[Scheduler.scala:70:24, :225:34]
wire b_pop_10 = _requests_io_valid[21]; // @[Scheduler.scala:70:24, :226:34]
wire _bypassMatches_T_84 = b_pop_10; // @[Scheduler.scala:226:34, :229:78]
wire c_pop_10 = _requests_io_valid[33]; // @[Scheduler.scala:70:24, :227:34]
wire _bypassMatches_T_81 = lowerMatches1[9]; // @[Scheduler.scala:200:8, :228:38]
wire _bypassMatches_T_82 = c_pop_10 | request_bits_prio_2; // @[Scheduler.scala:163:21, :227:34, :229:35]
wire _bypassMatches_T_83 = ~c_pop_10; // @[Scheduler.scala:227:34, :229:60]
wire _bypassMatches_T_85 = ~b_pop_10; // @[Scheduler.scala:226:34, :229:103]
wire _bypassMatches_T_86 = ~a_pop_10; // @[Scheduler.scala:225:34, :229:111]
wire _bypassMatches_T_87 = _bypassMatches_T_84 ? _bypassMatches_T_85 : _bypassMatches_T_86; // @[Scheduler.scala:229:{71,78,103,111}]
wire _bypassMatches_T_88 = _bypassMatches_T_82 ? _bypassMatches_T_83 : _bypassMatches_T_87; // @[Scheduler.scala:229:{28,35,60,71}]
wire bypassMatches_10 = _bypassMatches_T_81 & _bypassMatches_T_88; // @[Scheduler.scala:228:{38,42}, :229:28]
wire _may_pop_T_10 = a_pop_10 | b_pop_10; // @[Scheduler.scala:225:34, :226:34, :230:25]
wire may_pop_10 = _may_pop_T_10 | c_pop_10; // @[Scheduler.scala:227:34, :230:{25,34}]
wire bypass_10 = _bypass_T_10 & bypassMatches_10; // @[Scheduler.scala:228:42, :231:{32,41}]
wire _will_reload_T_10 = may_pop_10 | bypass_10; // @[Scheduler.scala:230:34, :231:41, :232:61]
wire will_reload_10 = _mshrs_9_io_schedule_bits_reload & _will_reload_T_10; // @[Scheduler.scala:71:46, :232:{49,61}]
wire _view__T_9_prio_0 = bypass_10 ? _view__WIRE_9_prio_0 : _requests_io_data_prio_0; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire _view__T_9_prio_1 = ~bypass_10 & _requests_io_data_prio_1; // @[Scheduler.scala:70:24, :231:41, :233:78]
wire _view__T_9_prio_2 = bypass_10 ? _view__WIRE_9_prio_2 : _requests_io_data_prio_2; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire _view__T_9_control = bypass_10 ? _view__WIRE_9_control : _requests_io_data_control; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire [2:0] _view__T_9_opcode = bypass_10 ? _view__WIRE_9_opcode : _requests_io_data_opcode; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire [2:0] _view__T_9_param = bypass_10 ? _view__WIRE_9_param : _requests_io_data_param; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire [2:0] _view__T_9_size = bypass_10 ? _view__WIRE_9_size : _requests_io_data_size; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire [5:0] _view__T_9_source = bypass_10 ? _view__WIRE_9_source : _requests_io_data_source; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire [8:0] _view__T_9_tag = bypass_10 ? _view__WIRE_9_tag : _requests_io_data_tag; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire [5:0] _view__T_9_offset = bypass_10 ? _view__WIRE_9_offset : _requests_io_data_offset; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire [5:0] _view__T_9_put = bypass_10 ? _view__WIRE_9_put : _requests_io_data_put; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire _mshrs_9_io_allocate_bits_repeat_T = mshrs_9_io_allocate_bits_tag == _mshrs_9_io_status_bits_tag; // @[Scheduler.scala:71:46, :233:72, :235:57, :280:83, :282:70]
wire _mshrs_9_io_allocate_valid_T = sel_9 & will_reload_10; // @[Scheduler.scala:223:28, :232:49, :236:32]
wire a_pop_11 = _requests_io_valid[10]; // @[Scheduler.scala:70:24, :225:34]
wire b_pop_11 = _requests_io_valid[22]; // @[Scheduler.scala:70:24, :226:34]
wire _bypassMatches_T_92 = b_pop_11; // @[Scheduler.scala:226:34, :229:78]
wire c_pop_11 = _requests_io_valid[34]; // @[Scheduler.scala:70:24, :227:34]
wire _bypassMatches_T_89 = lowerMatches1[10]; // @[Scheduler.scala:200:8, :228:38]
wire _bypassMatches_T_90 = c_pop_11 | request_bits_prio_2; // @[Scheduler.scala:163:21, :227:34, :229:35]
wire _bypassMatches_T_91 = ~c_pop_11; // @[Scheduler.scala:227:34, :229:60]
wire _bypassMatches_T_93 = ~b_pop_11; // @[Scheduler.scala:226:34, :229:103]
wire _bypassMatches_T_94 = ~a_pop_11; // @[Scheduler.scala:225:34, :229:111]
wire _bypassMatches_T_95 = _bypassMatches_T_92 ? _bypassMatches_T_93 : _bypassMatches_T_94; // @[Scheduler.scala:229:{71,78,103,111}]
wire _bypassMatches_T_96 = _bypassMatches_T_90 ? _bypassMatches_T_91 : _bypassMatches_T_95; // @[Scheduler.scala:229:{28,35,60,71}]
wire bypassMatches_11 = _bypassMatches_T_89 & _bypassMatches_T_96; // @[Scheduler.scala:228:{38,42}, :229:28]
wire _may_pop_T_11 = a_pop_11 | b_pop_11; // @[Scheduler.scala:225:34, :226:34, :230:25]
wire may_pop_11 = _may_pop_T_11 | c_pop_11; // @[Scheduler.scala:227:34, :230:{25,34}]
wire bypass_11 = _bypass_T_11 & bypassMatches_11; // @[Scheduler.scala:228:42, :231:{32,41}]
wire _will_reload_T_11 = may_pop_11 | bypass_11; // @[Scheduler.scala:230:34, :231:41, :232:61]
wire will_reload_11 = _mshrs_10_io_schedule_bits_reload & _will_reload_T_11; // @[Scheduler.scala:71:46, :232:{49,61}]
wire _view__T_10_prio_0 = bypass_11 ? _view__WIRE_10_prio_0 : _requests_io_data_prio_0; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire _view__T_10_prio_1 = ~bypass_11 & _requests_io_data_prio_1; // @[Scheduler.scala:70:24, :231:41, :233:78]
wire _view__T_10_prio_2 = bypass_11 ? _view__WIRE_10_prio_2 : _requests_io_data_prio_2; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire _view__T_10_control = bypass_11 ? _view__WIRE_10_control : _requests_io_data_control; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire [2:0] _view__T_10_opcode = bypass_11 ? _view__WIRE_10_opcode : _requests_io_data_opcode; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire [2:0] _view__T_10_param = bypass_11 ? _view__WIRE_10_param : _requests_io_data_param; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire [2:0] _view__T_10_size = bypass_11 ? _view__WIRE_10_size : _requests_io_data_size; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire [5:0] _view__T_10_source = bypass_11 ? _view__WIRE_10_source : _requests_io_data_source; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire [8:0] _view__T_10_tag = bypass_11 ? _view__WIRE_10_tag : _requests_io_data_tag; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire [5:0] _view__T_10_offset = bypass_11 ? _view__WIRE_10_offset : _requests_io_data_offset; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire [5:0] _view__T_10_put = bypass_11 ? _view__WIRE_10_put : _requests_io_data_put; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire _mshrs_10_io_allocate_bits_repeat_T = mshrs_10_io_allocate_bits_tag == _mshrs_10_io_status_bits_tag; // @[Scheduler.scala:71:46, :233:72, :235:57, :280:83, :282:70, :287:131, :289:74]
wire _mshrs_10_io_allocate_valid_T = sel_10 & will_reload_11; // @[Scheduler.scala:223:28, :232:49, :236:32]
wire a_pop_12 = _requests_io_valid[11]; // @[Scheduler.scala:70:24, :225:34]
wire b_pop_12 = _requests_io_valid[23]; // @[Scheduler.scala:70:24, :226:34]
wire _bypassMatches_T_100 = b_pop_12; // @[Scheduler.scala:226:34, :229:78]
wire c_pop_12 = _requests_io_valid[35]; // @[Scheduler.scala:70:24, :227:34]
wire _bypassMatches_T_97 = lowerMatches1[11]; // @[Scheduler.scala:200:8, :228:38]
wire _bypassMatches_T_98 = c_pop_12 | request_bits_prio_2; // @[Scheduler.scala:163:21, :227:34, :229:35]
wire _bypassMatches_T_99 = ~c_pop_12; // @[Scheduler.scala:227:34, :229:60]
wire _bypassMatches_T_101 = ~b_pop_12; // @[Scheduler.scala:226:34, :229:103]
wire _bypassMatches_T_102 = ~a_pop_12; // @[Scheduler.scala:225:34, :229:111]
wire _bypassMatches_T_103 = _bypassMatches_T_100 ? _bypassMatches_T_101 : _bypassMatches_T_102; // @[Scheduler.scala:229:{71,78,103,111}]
wire _bypassMatches_T_104 = _bypassMatches_T_98 ? _bypassMatches_T_99 : _bypassMatches_T_103; // @[Scheduler.scala:229:{28,35,60,71}]
wire bypassMatches_12 = _bypassMatches_T_97 & _bypassMatches_T_104; // @[Scheduler.scala:228:{38,42}, :229:28]
wire _may_pop_T_12 = a_pop_12 | b_pop_12; // @[Scheduler.scala:225:34, :226:34, :230:25]
wire may_pop_12 = _may_pop_T_12 | c_pop_12; // @[Scheduler.scala:227:34, :230:{25,34}]
wire bypass_12 = _bypass_T_12 & bypassMatches_12; // @[Scheduler.scala:228:42, :231:{32,41}]
wire _will_reload_T_12 = may_pop_12 | bypass_12; // @[Scheduler.scala:230:34, :231:41, :232:61]
wire will_reload_12 = _mshrs_11_io_schedule_bits_reload & _will_reload_T_12; // @[Scheduler.scala:71:46, :232:{49,61}]
wire _view__T_11_prio_0 = bypass_12 ? _view__WIRE_11_prio_0 : _requests_io_data_prio_0; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire _view__T_11_prio_1 = ~bypass_12 & _requests_io_data_prio_1; // @[Scheduler.scala:70:24, :231:41, :233:78]
wire _view__T_11_prio_2 = bypass_12 ? _view__WIRE_11_prio_2 : _requests_io_data_prio_2; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire _view__T_11_control = bypass_12 ? _view__WIRE_11_control : _requests_io_data_control; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire [2:0] _view__T_11_opcode = bypass_12 ? _view__WIRE_11_opcode : _requests_io_data_opcode; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire [2:0] _view__T_11_param = bypass_12 ? _view__WIRE_11_param : _requests_io_data_param; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire [2:0] _view__T_11_size = bypass_12 ? _view__WIRE_11_size : _requests_io_data_size; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire [5:0] _view__T_11_source = bypass_12 ? _view__WIRE_11_source : _requests_io_data_source; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire [8:0] _view__T_11_tag = bypass_12 ? _view__WIRE_11_tag : _requests_io_data_tag; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire [5:0] _view__T_11_offset = bypass_12 ? _view__WIRE_11_offset : _requests_io_data_offset; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire [5:0] _view__T_11_put = bypass_12 ? _view__WIRE_11_put : _requests_io_data_put; // @[Scheduler.scala:70:24, :231:41, :233:{78,95}]
wire _mshrs_11_io_allocate_bits_repeat_T = mshrs_11_io_allocate_bits_tag == _mshrs_11_io_status_bits_tag; // @[Scheduler.scala:71:46, :233:72, :235:57, :280:83, :282:70, :295:103, :297:73]
wire _mshrs_11_io_allocate_valid_T = sel_11 & will_reload_12; // @[Scheduler.scala:223:28, :232:49, :236:32]
wire [35:0] _prio_requests_T = ~_requests_io_valid; // @[Scheduler.scala:70:24, :240:25]
wire [23:0] _prio_requests_T_1 = _requests_io_valid[35:12]; // @[Scheduler.scala:70:24, :240:65]
wire [35:0] _prio_requests_T_2 = {_prio_requests_T[35:24], _prio_requests_T[23:0] | _prio_requests_T_1}; // @[Scheduler.scala:240:{25,44,65}]
wire [11:0] _prio_requests_T_3 = _requests_io_valid[35:24]; // @[Scheduler.scala:70:24, :240:103]
wire [35:0] _prio_requests_T_4 = {_prio_requests_T_2[35:12], _prio_requests_T_2[11:0] | _prio_requests_T_3}; // @[Scheduler.scala:240:{44,82,103}]
wire [35:0] prio_requests = ~_prio_requests_T_4; // @[Scheduler.scala:240:{23,82}]
wire [35:0] _pop_index_T = {pop_index_hi, mshr_selectOH}; // @[Scheduler.scala:121:70, :241:31]
wire [35:0] _pop_index_T_1 = _pop_index_T & prio_requests; // @[Scheduler.scala:240:23, :241:{31,77}]
wire [3:0] pop_index_hi_1 = _pop_index_T_1[35:32]; // @[OneHot.scala:30:18]
wire [31:0] pop_index_lo = _pop_index_T_1[31:0]; // @[OneHot.scala:31:18]
wire _pop_index_T_2 = |pop_index_hi_1; // @[OneHot.scala:30:18, :32:14]
wire [31:0] _pop_index_T_3 = {28'h0, pop_index_hi_1} | pop_index_lo; // @[OneHot.scala:30:18, :31:18, :32:28]
wire [15:0] pop_index_hi_2 = _pop_index_T_3[31:16]; // @[OneHot.scala:30:18, :32:28]
wire [15:0] pop_index_lo_1 = _pop_index_T_3[15:0]; // @[OneHot.scala:31:18, :32:28]
wire _pop_index_T_4 = |pop_index_hi_2; // @[OneHot.scala:30:18, :32:14]
wire [15:0] _pop_index_T_5 = pop_index_hi_2 | pop_index_lo_1; // @[OneHot.scala:30:18, :31:18, :32:28]
wire [7:0] pop_index_hi_3 = _pop_index_T_5[15:8]; // @[OneHot.scala:30:18, :32:28]
wire [7:0] pop_index_lo_2 = _pop_index_T_5[7:0]; // @[OneHot.scala:31:18, :32:28]
wire _pop_index_T_6 = |pop_index_hi_3; // @[OneHot.scala:30:18, :32:14]
wire [7:0] _pop_index_T_7 = pop_index_hi_3 | pop_index_lo_2; // @[OneHot.scala:30:18, :31:18, :32:28]
wire [3:0] pop_index_hi_4 = _pop_index_T_7[7:4]; // @[OneHot.scala:30:18, :32:28]
wire [3:0] pop_index_lo_3 = _pop_index_T_7[3:0]; // @[OneHot.scala:31:18, :32:28]
wire _pop_index_T_8 = |pop_index_hi_4; // @[OneHot.scala:30:18, :32:14]
wire [3:0] _pop_index_T_9 = pop_index_hi_4 | pop_index_lo_3; // @[OneHot.scala:30:18, :31:18, :32:28]
wire [1:0] pop_index_hi_5 = _pop_index_T_9[3:2]; // @[OneHot.scala:30:18, :32:28]
wire [1:0] pop_index_lo_4 = _pop_index_T_9[1:0]; // @[OneHot.scala:31:18, :32:28]
wire _pop_index_T_10 = |pop_index_hi_5; // @[OneHot.scala:30:18, :32:14]
wire [1:0] _pop_index_T_11 = pop_index_hi_5 | pop_index_lo_4; // @[OneHot.scala:30:18, :31:18, :32:28]
wire _pop_index_T_12 = _pop_index_T_11[1]; // @[OneHot.scala:32:28]
wire [1:0] _pop_index_T_13 = {_pop_index_T_10, _pop_index_T_12}; // @[OneHot.scala:32:{10,14}]
wire [2:0] _pop_index_T_14 = {_pop_index_T_8, _pop_index_T_13}; // @[OneHot.scala:32:{10,14}]
wire [3:0] _pop_index_T_15 = {_pop_index_T_6, _pop_index_T_14}; // @[OneHot.scala:32:{10,14}]
wire [4:0] _pop_index_T_16 = {_pop_index_T_4, _pop_index_T_15}; // @[OneHot.scala:32:{10,14}]
wire [5:0] pop_index = {_pop_index_T_2, _pop_index_T_16}; // @[OneHot.scala:32:{10,14}]
wire lb_tag_mismatch = scheduleTag != _requests_io_data_tag; // @[Mux.scala:30:73]
wire mshr_uses_directory_assuming_no_bypass = _mshr_uses_directory_assuming_no_bypass_T & lb_tag_mismatch; // @[Scheduler.scala:246:37, :247:{64,75}]
wire mshr_uses_directory_for_lb = will_pop & lb_tag_mismatch; // @[Scheduler.scala:215:45, :246:37, :248:45]
wire [8:0] _mshr_uses_directory_T = bypass ? request_bits_tag : _requests_io_data_tag; // @[Scheduler.scala:70:24, :163:21, :213:39, :249:63]
wire _mshr_uses_directory_T_1 = scheduleTag != _mshr_uses_directory_T; // @[Mux.scala:30:73]
wire mshr_uses_directory = will_reload & _mshr_uses_directory_T_1; // @[Scheduler.scala:214:37, :249:{41,56}]
wire [1:0] mshr_validOH_lo_lo_hi = {_mshrs_2_io_status_valid, _mshrs_1_io_status_valid}; // @[Scheduler.scala:71:46, :252:25]
wire [2:0] mshr_validOH_lo_lo = {mshr_validOH_lo_lo_hi, _mshrs_0_io_status_valid}; // @[Scheduler.scala:71:46, :252:25]
wire [1:0] mshr_validOH_lo_hi_hi = {_mshrs_5_io_status_valid, _mshrs_4_io_status_valid}; // @[Scheduler.scala:71:46, :252:25]
wire [2:0] mshr_validOH_lo_hi = {mshr_validOH_lo_hi_hi, _mshrs_3_io_status_valid}; // @[Scheduler.scala:71:46, :252:25]
wire [5:0] mshr_validOH_lo = {mshr_validOH_lo_hi, mshr_validOH_lo_lo}; // @[Scheduler.scala:252:25]
wire [1:0] mshr_validOH_hi_lo_hi = {_mshrs_8_io_status_valid, _mshrs_7_io_status_valid}; // @[Scheduler.scala:71:46, :252:25]
wire [2:0] mshr_validOH_hi_lo = {mshr_validOH_hi_lo_hi, _mshrs_6_io_status_valid}; // @[Scheduler.scala:71:46, :252:25]
wire [1:0] mshr_validOH_hi_hi_hi = {_mshrs_11_io_status_valid, _mshrs_10_io_status_valid}; // @[Scheduler.scala:71:46, :252:25]
wire [2:0] mshr_validOH_hi_hi = {mshr_validOH_hi_hi_hi, _mshrs_9_io_status_valid}; // @[Scheduler.scala:71:46, :252:25]
wire [5:0] mshr_validOH_hi = {mshr_validOH_hi_hi, mshr_validOH_hi_lo}; // @[Scheduler.scala:252:25]
wire [11:0] mshr_validOH = {mshr_validOH_hi, mshr_validOH_lo}; // @[Scheduler.scala:252:25]
wire [11:0] _mshr_free_T = ~mshr_validOH; // @[Scheduler.scala:252:25, :253:20]
wire [11:0] _mshr_free_T_1 = _mshr_free_T & prioFilter; // @[Scheduler.scala:182:23, :253:{20,34}]
wire mshr_free = |_mshr_free_T_1; // @[Scheduler.scala:253:{34,48}]
wire bypassQueue = schedule_reload & bypassMatches; // @[Mux.scala:30:73]
wire _request_alloc_cases_T = ~mshr_uses_directory_assuming_no_bypass; // @[Scheduler.scala:247:75, :258:16]
wire _request_alloc_cases_T_1 = alloc & _request_alloc_cases_T; // @[Scheduler.scala:173:15, :258:{13,16}]
wire _request_alloc_cases_T_2 = _request_alloc_cases_T_1 & mshr_free; // @[Scheduler.scala:253:48, :258:{13,56}]
wire _request_alloc_cases_T_9 = _request_alloc_cases_T_2; // @[Scheduler.scala:258:{56,70}]
wire _request_alloc_cases_T_3 = ~mshr_uses_directory_assuming_no_bypass; // @[Scheduler.scala:247:75, :258:16, :259:16]
wire _request_alloc_cases_T_5 = ~_mshrs_10_io_status_valid; // @[Scheduler.scala:71:46, :259:59]
wire _request_alloc_cases_T_7 = ~_mshrs_11_io_status_valid; // @[Scheduler.scala:71:46, :259:87]
wire _request_alloc_cases_T_10 = ~mshr_uses_directory_assuming_no_bypass; // @[Scheduler.scala:247:75, :258:16, :260:16]
wire _request_alloc_cases_T_11 = nestC & _request_alloc_cases_T_10; // @[Scheduler.scala:180:70, :260:{13,16}]
wire _request_alloc_cases_T_12 = ~_mshrs_11_io_status_valid; // @[Scheduler.scala:71:46, :259:87, :260:59]
wire _request_alloc_cases_T_13 = _request_alloc_cases_T_11 & _request_alloc_cases_T_12; // @[Scheduler.scala:260:{13,56,59}]
wire request_alloc_cases = _request_alloc_cases_T_9 | _request_alloc_cases_T_13; // @[Scheduler.scala:258:70, :259:112, :260:56]
wire _request_ready_T = bypassQueue | _requests_io_push_ready; // @[Scheduler.scala:70:24, :256:37, :261:66]
wire _request_ready_T_1 = queue & _request_ready_T; // @[Scheduler.scala:185:63, :261:{50,66}]
assign _request_ready_T_2 = request_alloc_cases | _request_ready_T_1; // @[Scheduler.scala:259:112, :261:{40,50}]
assign request_ready = _request_ready_T_2; // @[Scheduler.scala:163:21, :261:40]
wire alloc_uses_directory = request_valid & request_alloc_cases; // @[Scheduler.scala:163:21, :259:112, :262:44]
wire _directory_io_read_valid_T = mshr_uses_directory | alloc_uses_directory; // @[Scheduler.scala:249:41, :262:44, :265:50]
wire [10:0] _directory_io_read_bits_set_T = mshr_uses_directory_for_lb ? scheduleSet : request_bits_set; // @[Mux.scala:30:73]
wire [8:0] _directory_io_read_bits_tag_T = mshr_uses_directory_for_lb ? _requests_io_data_tag : request_bits_tag; // @[Scheduler.scala:70:24, :163:21, :248:45, :267:36]
wire _requests_io_push_valid_T_1 = ~bypassQueue; // @[Scheduler.scala:256:37, :270:55]
wire _requests_io_push_valid_T_2 = _requests_io_push_valid_T & _requests_io_push_valid_T_1; // @[Scheduler.scala:270:{43,52,55}]
wire [3:0] requests_io_push_bits_index_hi = _requests_io_push_bits_index_T[11:8]; // @[OneHot.scala:30:18]
wire [7:0] requests_io_push_bits_index_lo = _requests_io_push_bits_index_T[7:0]; // @[OneHot.scala:31:18]
wire _requests_io_push_bits_index_T_1 = |requests_io_push_bits_index_hi; // @[OneHot.scala:30:18, :32:14]
wire [7:0] _requests_io_push_bits_index_T_2 = {4'h0, requests_io_push_bits_index_hi} | requests_io_push_bits_index_lo; // @[OneHot.scala:30:18, :31:18, :32:28]
wire [3:0] requests_io_push_bits_index_hi_1 = _requests_io_push_bits_index_T_2[7:4]; // @[OneHot.scala:30:18, :32:28]
wire [3:0] requests_io_push_bits_index_lo_1 = _requests_io_push_bits_index_T_2[3:0]; // @[OneHot.scala:31:18, :32:28]
wire _requests_io_push_bits_index_T_3 = |requests_io_push_bits_index_hi_1; // @[OneHot.scala:30:18, :32:14]
wire [3:0] _requests_io_push_bits_index_T_4 = requests_io_push_bits_index_hi_1 | requests_io_push_bits_index_lo_1; // @[OneHot.scala:30:18, :31:18, :32:28]
wire [1:0] requests_io_push_bits_index_hi_2 = _requests_io_push_bits_index_T_4[3:2]; // @[OneHot.scala:30:18, :32:28]
wire [1:0] requests_io_push_bits_index_lo_2 = _requests_io_push_bits_index_T_4[1:0]; // @[OneHot.scala:31:18, :32:28]
wire _requests_io_push_bits_index_T_5 = |requests_io_push_bits_index_hi_2; // @[OneHot.scala:30:18, :32:14]
wire [1:0] _requests_io_push_bits_index_T_6 = requests_io_push_bits_index_hi_2 | requests_io_push_bits_index_lo_2; // @[OneHot.scala:30:18, :31:18, :32:28]
wire _requests_io_push_bits_index_T_7 = _requests_io_push_bits_index_T_6[1]; // @[OneHot.scala:32:28]
wire [1:0] _requests_io_push_bits_index_T_8 = {_requests_io_push_bits_index_T_5, _requests_io_push_bits_index_T_7}; // @[OneHot.scala:32:{10,14}]
wire [2:0] _requests_io_push_bits_index_T_9 = {_requests_io_push_bits_index_T_3, _requests_io_push_bits_index_T_8}; // @[OneHot.scala:32:{10,14}]
wire [3:0] _requests_io_push_bits_index_T_10 = {_requests_io_push_bits_index_T_1, _requests_io_push_bits_index_T_9}; // @[OneHot.scala:32:{10,14}]
wire [23:0] _requests_io_push_bits_index_T_11 = {lowerMatches1, 12'h0}; // @[Scheduler.scala:200:8, :275:30]
wire [7:0] requests_io_push_bits_index_hi_3 = _requests_io_push_bits_index_T_11[23:16]; // @[OneHot.scala:30:18]
wire [15:0] requests_io_push_bits_index_lo_3 = _requests_io_push_bits_index_T_11[15:0]; // @[OneHot.scala:31:18]
wire _requests_io_push_bits_index_T_12 = |requests_io_push_bits_index_hi_3; // @[OneHot.scala:30:18, :32:14]
wire [15:0] _requests_io_push_bits_index_T_13 = {8'h0, requests_io_push_bits_index_hi_3} | requests_io_push_bits_index_lo_3; // @[OneHot.scala:30:18, :31:18, :32:28]
wire [7:0] requests_io_push_bits_index_hi_4 = _requests_io_push_bits_index_T_13[15:8]; // @[OneHot.scala:30:18, :32:28]
wire [7:0] requests_io_push_bits_index_lo_4 = _requests_io_push_bits_index_T_13[7:0]; // @[OneHot.scala:31:18, :32:28]
wire _requests_io_push_bits_index_T_14 = |requests_io_push_bits_index_hi_4; // @[OneHot.scala:30:18, :32:14]
wire [7:0] _requests_io_push_bits_index_T_15 = requests_io_push_bits_index_hi_4 | requests_io_push_bits_index_lo_4; // @[OneHot.scala:30:18, :31:18, :32:28]
wire [3:0] requests_io_push_bits_index_hi_5 = _requests_io_push_bits_index_T_15[7:4]; // @[OneHot.scala:30:18, :32:28]
wire [3:0] requests_io_push_bits_index_lo_5 = _requests_io_push_bits_index_T_15[3:0]; // @[OneHot.scala:31:18, :32:28]
wire _requests_io_push_bits_index_T_16 = |requests_io_push_bits_index_hi_5; // @[OneHot.scala:30:18, :32:14]
wire [3:0] _requests_io_push_bits_index_T_17 = requests_io_push_bits_index_hi_5 | requests_io_push_bits_index_lo_5; // @[OneHot.scala:30:18, :31:18, :32:28]
wire [1:0] requests_io_push_bits_index_hi_6 = _requests_io_push_bits_index_T_17[3:2]; // @[OneHot.scala:30:18, :32:28]
wire [1:0] requests_io_push_bits_index_lo_6 = _requests_io_push_bits_index_T_17[1:0]; // @[OneHot.scala:31:18, :32:28]
wire _requests_io_push_bits_index_T_18 = |requests_io_push_bits_index_hi_6; // @[OneHot.scala:30:18, :32:14]
wire [1:0] _requests_io_push_bits_index_T_19 = requests_io_push_bits_index_hi_6 | requests_io_push_bits_index_lo_6; // @[OneHot.scala:30:18, :31:18, :32:28]
wire _requests_io_push_bits_index_T_20 = _requests_io_push_bits_index_T_19[1]; // @[OneHot.scala:32:28]
wire [1:0] _requests_io_push_bits_index_T_21 = {_requests_io_push_bits_index_T_18, _requests_io_push_bits_index_T_20}; // @[OneHot.scala:32:{10,14}]
wire [2:0] _requests_io_push_bits_index_T_22 = {_requests_io_push_bits_index_T_16, _requests_io_push_bits_index_T_21}; // @[OneHot.scala:32:{10,14}]
wire [3:0] _requests_io_push_bits_index_T_23 = {_requests_io_push_bits_index_T_14, _requests_io_push_bits_index_T_22}; // @[OneHot.scala:32:{10,14}]
wire [4:0] _requests_io_push_bits_index_T_24 = {_requests_io_push_bits_index_T_12, _requests_io_push_bits_index_T_23}; // @[OneHot.scala:32:{10,14}]
wire [35:0] _requests_io_push_bits_index_T_25 = {lowerMatches1, 24'h0}; // @[Scheduler.scala:200:8, :276:30]
wire [3:0] requests_io_push_bits_index_hi_7 = _requests_io_push_bits_index_T_25[35:32]; // @[OneHot.scala:30:18]
wire [31:0] requests_io_push_bits_index_lo_7 = _requests_io_push_bits_index_T_25[31:0]; // @[OneHot.scala:31:18]
wire _requests_io_push_bits_index_T_26 = |requests_io_push_bits_index_hi_7; // @[OneHot.scala:30:18, :32:14]
wire [31:0] _requests_io_push_bits_index_T_27 = {28'h0, requests_io_push_bits_index_hi_7} | requests_io_push_bits_index_lo_7; // @[OneHot.scala:30:18, :31:18, :32:28]
wire [15:0] requests_io_push_bits_index_hi_8 = _requests_io_push_bits_index_T_27[31:16]; // @[OneHot.scala:30:18, :32:28]
wire [15:0] requests_io_push_bits_index_lo_8 = _requests_io_push_bits_index_T_27[15:0]; // @[OneHot.scala:31:18, :32:28]
wire _requests_io_push_bits_index_T_28 = |requests_io_push_bits_index_hi_8; // @[OneHot.scala:30:18, :32:14]
wire [15:0] _requests_io_push_bits_index_T_29 = requests_io_push_bits_index_hi_8 | requests_io_push_bits_index_lo_8; // @[OneHot.scala:30:18, :31:18, :32:28]
wire [7:0] requests_io_push_bits_index_hi_9 = _requests_io_push_bits_index_T_29[15:8]; // @[OneHot.scala:30:18, :32:28]
wire [7:0] requests_io_push_bits_index_lo_9 = _requests_io_push_bits_index_T_29[7:0]; // @[OneHot.scala:31:18, :32:28]
wire _requests_io_push_bits_index_T_30 = |requests_io_push_bits_index_hi_9; // @[OneHot.scala:30:18, :32:14]
wire [7:0] _requests_io_push_bits_index_T_31 = requests_io_push_bits_index_hi_9 | requests_io_push_bits_index_lo_9; // @[OneHot.scala:30:18, :31:18, :32:28]
wire [3:0] requests_io_push_bits_index_hi_10 = _requests_io_push_bits_index_T_31[7:4]; // @[OneHot.scala:30:18, :32:28]
wire [3:0] requests_io_push_bits_index_lo_10 = _requests_io_push_bits_index_T_31[3:0]; // @[OneHot.scala:31:18, :32:28]
wire _requests_io_push_bits_index_T_32 = |requests_io_push_bits_index_hi_10; // @[OneHot.scala:30:18, :32:14]
wire [3:0] _requests_io_push_bits_index_T_33 = requests_io_push_bits_index_hi_10 | requests_io_push_bits_index_lo_10; // @[OneHot.scala:30:18, :31:18, :32:28]
wire [1:0] requests_io_push_bits_index_hi_11 = _requests_io_push_bits_index_T_33[3:2]; // @[OneHot.scala:30:18, :32:28]
wire [1:0] requests_io_push_bits_index_lo_11 = _requests_io_push_bits_index_T_33[1:0]; // @[OneHot.scala:31:18, :32:28]
wire _requests_io_push_bits_index_T_34 = |requests_io_push_bits_index_hi_11; // @[OneHot.scala:30:18, :32:14]
wire [1:0] _requests_io_push_bits_index_T_35 = requests_io_push_bits_index_hi_11 | requests_io_push_bits_index_lo_11; // @[OneHot.scala:30:18, :31:18, :32:28]
wire _requests_io_push_bits_index_T_36 = _requests_io_push_bits_index_T_35[1]; // @[OneHot.scala:32:28]
wire [1:0] _requests_io_push_bits_index_T_37 = {_requests_io_push_bits_index_T_34, _requests_io_push_bits_index_T_36}; // @[OneHot.scala:32:{10,14}]
wire [2:0] _requests_io_push_bits_index_T_38 = {_requests_io_push_bits_index_T_32, _requests_io_push_bits_index_T_37}; // @[OneHot.scala:32:{10,14}]
wire [3:0] _requests_io_push_bits_index_T_39 = {_requests_io_push_bits_index_T_30, _requests_io_push_bits_index_T_38}; // @[OneHot.scala:32:{10,14}]
wire [4:0] _requests_io_push_bits_index_T_40 = {_requests_io_push_bits_index_T_28, _requests_io_push_bits_index_T_39}; // @[OneHot.scala:32:{10,14}]
wire [5:0] _requests_io_push_bits_index_T_41 = {_requests_io_push_bits_index_T_26, _requests_io_push_bits_index_T_40}; // @[OneHot.scala:32:{10,14}]
wire [3:0] _requests_io_push_bits_index_T_42 = request_bits_prio_0 ? _requests_io_push_bits_index_T_10 : 4'h0; // @[OneHot.scala:32:10]
wire [5:0] _requests_io_push_bits_index_T_44 = request_bits_prio_2 ? _requests_io_push_bits_index_T_41 : 6'h0; // @[OneHot.scala:32:10]
wire [4:0] _requests_io_push_bits_index_T_45 = {1'h0, _requests_io_push_bits_index_T_42}; // @[Mux.scala:30:73]
wire [5:0] _requests_io_push_bits_index_T_46 = {1'h0, _requests_io_push_bits_index_T_45} | _requests_io_push_bits_index_T_44; // @[Mux.scala:30:73]
wire [5:0] _requests_io_push_bits_index_WIRE = _requests_io_push_bits_index_T_46; // @[Mux.scala:30:73]
wire [11:0] _mshr_insertOH_T = ~mshr_validOH; // @[Scheduler.scala:252:25, :253:20, :278:32]
wire [12:0] _mshr_insertOH_T_1 = {_mshr_insertOH_T, 1'h0}; // @[package.scala:253:48]
wire [11:0] _mshr_insertOH_T_2 = _mshr_insertOH_T_1[11:0]; // @[package.scala:253:{48,53}]
wire [11:0] _mshr_insertOH_T_3 = _mshr_insertOH_T | _mshr_insertOH_T_2; // @[package.scala:253:{43,53}]
wire [13:0] _mshr_insertOH_T_4 = {_mshr_insertOH_T_3, 2'h0}; // @[package.scala:253:{43,48}]
wire [11:0] _mshr_insertOH_T_5 = _mshr_insertOH_T_4[11:0]; // @[package.scala:253:{48,53}]
wire [11:0] _mshr_insertOH_T_6 = _mshr_insertOH_T_3 | _mshr_insertOH_T_5; // @[package.scala:253:{43,53}]
wire [15:0] _mshr_insertOH_T_7 = {_mshr_insertOH_T_6, 4'h0}; // @[package.scala:253:{43,48}]
wire [11:0] _mshr_insertOH_T_8 = _mshr_insertOH_T_7[11:0]; // @[package.scala:253:{48,53}]
wire [11:0] _mshr_insertOH_T_9 = _mshr_insertOH_T_6 | _mshr_insertOH_T_8; // @[package.scala:253:{43,53}]
wire [19:0] _mshr_insertOH_T_10 = {_mshr_insertOH_T_9, 8'h0}; // @[package.scala:253:{43,48}]
wire [11:0] _mshr_insertOH_T_11 = _mshr_insertOH_T_10[11:0]; // @[package.scala:253:{48,53}]
wire [11:0] _mshr_insertOH_T_12 = _mshr_insertOH_T_9 | _mshr_insertOH_T_11; // @[package.scala:253:{43,53}]
wire [11:0] _mshr_insertOH_T_13 = _mshr_insertOH_T_12; // @[package.scala:253:43, :254:17]
wire [12:0] _mshr_insertOH_T_14 = {_mshr_insertOH_T_13, 1'h0}; // @[package.scala:254:17]
wire [12:0] _mshr_insertOH_T_15 = ~_mshr_insertOH_T_14; // @[Scheduler.scala:278:{23,47}]
wire [11:0] _mshr_insertOH_T_16 = ~mshr_validOH; // @[Scheduler.scala:252:25, :253:20, :278:55]
wire [12:0] _mshr_insertOH_T_17 = {1'h0, _mshr_insertOH_T_15[11:0] & _mshr_insertOH_T_16}; // @[Scheduler.scala:278:{23,53,55}]
wire [12:0] mshr_insertOH = {1'h0, _mshr_insertOH_T_17[11:0] & prioFilter}; // @[Scheduler.scala:182:23, :278:{53,69}]
wire _T_76 = request_valid & alloc; // @[Scheduler.scala:163:21, :173:15, :280:25]
wire _T_35 = _T_76 & mshr_insertOH[0] & ~mshr_uses_directory_assuming_no_bypass; // @[Scheduler.scala:247:75, :258:16, :278:69, :279:18, :280:{25,34,39}]
assign mshrs_0_io_allocate_bits_tag = _T_35 ? request_bits_tag : _view__T_tag; // @[Scheduler.scala:163:21, :233:{72,78}, :280:{34,39,83}, :282:70]
wire _T_39 = _T_76 & mshr_insertOH[1] & ~mshr_uses_directory_assuming_no_bypass; // @[Scheduler.scala:247:75, :258:16, :278:69, :279:18, :280:{25,34,39}]
assign mshrs_1_io_allocate_bits_tag = _T_39 ? request_bits_tag : _view__T_1_tag; // @[Scheduler.scala:163:21, :233:{72,78}, :280:{34,39,83}, :282:70]
wire _T_43 = _T_76 & mshr_insertOH[2] & ~mshr_uses_directory_assuming_no_bypass; // @[Scheduler.scala:247:75, :258:16, :278:69, :279:18, :280:{25,34,39}]
assign mshrs_2_io_allocate_bits_tag = _T_43 ? request_bits_tag : _view__T_2_tag; // @[Scheduler.scala:163:21, :233:{72,78}, :280:{34,39,83}, :282:70]
wire _T_47 = _T_76 & mshr_insertOH[3] & ~mshr_uses_directory_assuming_no_bypass; // @[Scheduler.scala:247:75, :258:16, :278:69, :279:18, :280:{25,34,39}]
assign mshrs_3_io_allocate_bits_tag = _T_47 ? request_bits_tag : _view__T_3_tag; // @[Scheduler.scala:163:21, :233:{72,78}, :280:{34,39,83}, :282:70]
wire _T_51 = _T_76 & mshr_insertOH[4] & ~mshr_uses_directory_assuming_no_bypass; // @[Scheduler.scala:247:75, :258:16, :278:69, :279:18, :280:{25,34,39}]
assign mshrs_4_io_allocate_bits_tag = _T_51 ? request_bits_tag : _view__T_4_tag; // @[Scheduler.scala:163:21, :233:{72,78}, :280:{34,39,83}, :282:70]
wire _T_55 = _T_76 & mshr_insertOH[5] & ~mshr_uses_directory_assuming_no_bypass; // @[Scheduler.scala:247:75, :258:16, :278:69, :279:18, :280:{25,34,39}]
assign mshrs_5_io_allocate_bits_tag = _T_55 ? request_bits_tag : _view__T_5_tag; // @[Scheduler.scala:163:21, :233:{72,78}, :280:{34,39,83}, :282:70]
wire _T_59 = _T_76 & mshr_insertOH[6] & ~mshr_uses_directory_assuming_no_bypass; // @[Scheduler.scala:247:75, :258:16, :278:69, :279:18, :280:{25,34,39}]
assign mshrs_6_io_allocate_bits_tag = _T_59 ? request_bits_tag : _view__T_6_tag; // @[Scheduler.scala:163:21, :233:{72,78}, :280:{34,39,83}, :282:70]
wire _T_63 = _T_76 & mshr_insertOH[7] & ~mshr_uses_directory_assuming_no_bypass; // @[Scheduler.scala:247:75, :258:16, :278:69, :279:18, :280:{25,34,39}]
assign mshrs_7_io_allocate_bits_tag = _T_63 ? request_bits_tag : _view__T_7_tag; // @[Scheduler.scala:163:21, :233:{72,78}, :280:{34,39,83}, :282:70]
wire _T_67 = _T_76 & mshr_insertOH[8] & ~mshr_uses_directory_assuming_no_bypass; // @[Scheduler.scala:247:75, :258:16, :278:69, :279:18, :280:{25,34,39}]
assign mshrs_8_io_allocate_bits_tag = _T_67 ? request_bits_tag : _view__T_8_tag; // @[Scheduler.scala:163:21, :233:{72,78}, :280:{34,39,83}, :282:70]
wire _T_71 = _T_76 & mshr_insertOH[9] & ~mshr_uses_directory_assuming_no_bypass; // @[Scheduler.scala:247:75, :258:16, :278:69, :279:18, :280:{25,34,39}]
assign mshrs_9_io_allocate_bits_tag = _T_71 ? request_bits_tag : _view__T_9_tag; // @[Scheduler.scala:163:21, :233:{72,78}, :280:{34,39,83}, :282:70]
wire _T_75 = _T_76 & mshr_insertOH[10] & ~mshr_uses_directory_assuming_no_bypass; // @[Scheduler.scala:247:75, :258:16, :278:69, :279:18, :280:{25,34,39}]
assign mshrs_10_io_allocate_bits_tag = _T_75 ? request_bits_tag : _view__T_10_tag; // @[Scheduler.scala:163:21, :233:{72,78}, :280:{34,39,83}, :282:70, :287:131, :289:74]
wire _T_95 = request_valid & nestC & ~_mshrs_11_io_status_valid & ~mshr_uses_directory_assuming_no_bypass; // @[Scheduler.scala:71:46, :163:21, :180:70, :193:33, :247:75, :258:16, :259:87, :295:{32,59}]
wire _GEN_3 = _T_95 | _T_76 & mshr_insertOH[11] & ~mshr_uses_directory_assuming_no_bypass; // @[Scheduler.scala:193:33, :236:25, :247:75, :258:16, :278:69, :279:18, :280:{25,34,39,83}, :281:27, :295:{32,59,103}, :296:30]
assign mshrs_11_io_allocate_bits_tag = _GEN_3 ? request_bits_tag : _view__T_11_tag; // @[Scheduler.scala:163:21, :233:{72,78}, :236:25, :280:83, :281:27, :282:70, :295:103, :296:30, :297:73] |
Generate the Verilog code corresponding to the following Chisel files.
File Buffer.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.diplomacy.BufferParams
class TLBufferNode (
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)(implicit valName: ValName) extends TLAdapterNode(
clientFn = { p => p.v1copy(minLatency = p.minLatency + b.latency + c.latency) },
managerFn = { p => p.v1copy(minLatency = p.minLatency + a.latency + d.latency) }
) {
override lazy val nodedebugstring = s"a:${a.toString}, b:${b.toString}, c:${c.toString}, d:${d.toString}, e:${e.toString}"
override def circuitIdentity = List(a,b,c,d,e).forall(_ == BufferParams.none)
}
class TLBuffer(
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)(implicit p: Parameters) extends LazyModule
{
def this(ace: BufferParams, bd: BufferParams)(implicit p: Parameters) = this(ace, bd, ace, bd, ace)
def this(abcde: BufferParams)(implicit p: Parameters) = this(abcde, abcde)
def this()(implicit p: Parameters) = this(BufferParams.default)
val node = new TLBufferNode(a, b, c, d, e)
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
def headBundle = node.out.head._2.bundle
override def desiredName = (Seq("TLBuffer") ++ node.out.headOption.map(_._2.bundle.shortName)).mkString("_")
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
out.a <> a(in .a)
in .d <> d(out.d)
if (edgeOut.manager.anySupportAcquireB && edgeOut.client.anySupportProbe) {
in .b <> b(out.b)
out.c <> c(in .c)
out.e <> e(in .e)
} else {
in.b.valid := false.B
in.c.ready := true.B
in.e.ready := true.B
out.b.ready := true.B
out.c.valid := false.B
out.e.valid := false.B
}
}
}
}
object TLBuffer
{
def apply() (implicit p: Parameters): TLNode = apply(BufferParams.default)
def apply(abcde: BufferParams) (implicit p: Parameters): TLNode = apply(abcde, abcde)
def apply(ace: BufferParams, bd: BufferParams)(implicit p: Parameters): TLNode = apply(ace, bd, ace, bd, ace)
def apply(
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)(implicit p: Parameters): TLNode =
{
val buffer = LazyModule(new TLBuffer(a, b, c, d, e))
buffer.node
}
def chain(depth: Int, name: Option[String] = None)(implicit p: Parameters): Seq[TLNode] = {
val buffers = Seq.fill(depth) { LazyModule(new TLBuffer()) }
name.foreach { n => buffers.zipWithIndex.foreach { case (b, i) => b.suggestName(s"${n}_${i}") } }
buffers.map(_.node)
}
def chainNode(depth: Int, name: Option[String] = None)(implicit p: Parameters): TLNode = {
chain(depth, name)
.reduceLeftOption(_ :*=* _)
.getOrElse(TLNameNode("no_buffer"))
}
}
File Nodes.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.nodes._
import freechips.rocketchip.util.{AsyncQueueParams,RationalDirection}
case object TLMonitorBuilder extends Field[TLMonitorArgs => TLMonitorBase](args => new TLMonitor(args))
object TLImp extends NodeImp[TLMasterPortParameters, TLSlavePortParameters, TLEdgeOut, TLEdgeIn, TLBundle]
{
def edgeO(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeOut(pd, pu, p, sourceInfo)
def edgeI(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeIn (pd, pu, p, sourceInfo)
def bundleO(eo: TLEdgeOut) = TLBundle(eo.bundle)
def bundleI(ei: TLEdgeIn) = TLBundle(ei.bundle)
def render(ei: TLEdgeIn) = RenderedEdge(colour = "#000000" /* black */, label = (ei.manager.beatBytes * 8).toString)
override def monitor(bundle: TLBundle, edge: TLEdgeIn): Unit = {
val monitor = Module(edge.params(TLMonitorBuilder)(TLMonitorArgs(edge)))
monitor.io.in := bundle
}
override def mixO(pd: TLMasterPortParameters, node: OutwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLMasterPortParameters =
pd.v1copy(clients = pd.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })
override def mixI(pu: TLSlavePortParameters, node: InwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLSlavePortParameters =
pu.v1copy(managers = pu.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })
}
trait TLFormatNode extends FormatNode[TLEdgeIn, TLEdgeOut]
case class TLClientNode(portParams: Seq[TLMasterPortParameters])(implicit valName: ValName) extends SourceNode(TLImp)(portParams) with TLFormatNode
case class TLManagerNode(portParams: Seq[TLSlavePortParameters])(implicit valName: ValName) extends SinkNode(TLImp)(portParams) with TLFormatNode
case class TLAdapterNode(
clientFn: TLMasterPortParameters => TLMasterPortParameters = { s => s },
managerFn: TLSlavePortParameters => TLSlavePortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLImp)(clientFn, managerFn) with TLFormatNode
case class TLJunctionNode(
clientFn: Seq[TLMasterPortParameters] => Seq[TLMasterPortParameters],
managerFn: Seq[TLSlavePortParameters] => Seq[TLSlavePortParameters])(
implicit valName: ValName)
extends JunctionNode(TLImp)(clientFn, managerFn) with TLFormatNode
case class TLIdentityNode()(implicit valName: ValName) extends IdentityNode(TLImp)() with TLFormatNode
object TLNameNode {
def apply(name: ValName) = TLIdentityNode()(name)
def apply(name: Option[String]): TLIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLIdentityNode = apply(Some(name))
}
case class TLEphemeralNode()(implicit valName: ValName) extends EphemeralNode(TLImp)()
object TLTempNode {
def apply(): TLEphemeralNode = TLEphemeralNode()(ValName("temp"))
}
case class TLNexusNode(
clientFn: Seq[TLMasterPortParameters] => TLMasterPortParameters,
managerFn: Seq[TLSlavePortParameters] => TLSlavePortParameters)(
implicit valName: ValName)
extends NexusNode(TLImp)(clientFn, managerFn) with TLFormatNode
abstract class TLCustomNode(implicit valName: ValName)
extends CustomNode(TLImp) with TLFormatNode
// Asynchronous crossings
trait TLAsyncFormatNode extends FormatNode[TLAsyncEdgeParameters, TLAsyncEdgeParameters]
object TLAsyncImp extends SimpleNodeImp[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncEdgeParameters, TLAsyncBundle]
{
def edge(pd: TLAsyncClientPortParameters, pu: TLAsyncManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLAsyncEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLAsyncEdgeParameters) = new TLAsyncBundle(e.bundle)
def render(e: TLAsyncEdgeParameters) = RenderedEdge(colour = "#ff0000" /* red */, label = e.manager.async.depth.toString)
override def mixO(pd: TLAsyncClientPortParameters, node: OutwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLAsyncManagerPortParameters, node: InwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLAsyncAdapterNode(
clientFn: TLAsyncClientPortParameters => TLAsyncClientPortParameters = { s => s },
managerFn: TLAsyncManagerPortParameters => TLAsyncManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLAsyncImp)(clientFn, managerFn) with TLAsyncFormatNode
case class TLAsyncIdentityNode()(implicit valName: ValName) extends IdentityNode(TLAsyncImp)() with TLAsyncFormatNode
object TLAsyncNameNode {
def apply(name: ValName) = TLAsyncIdentityNode()(name)
def apply(name: Option[String]): TLAsyncIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLAsyncIdentityNode = apply(Some(name))
}
case class TLAsyncSourceNode(sync: Option[Int])(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLAsyncImp)(
dFn = { p => TLAsyncClientPortParameters(p) },
uFn = { p => p.base.v1copy(minLatency = p.base.minLatency + sync.getOrElse(p.async.sync)) }) with FormatNode[TLEdgeIn, TLAsyncEdgeParameters] // discard cycles in other clock domain
case class TLAsyncSinkNode(async: AsyncQueueParams)(implicit valName: ValName)
extends MixedAdapterNode(TLAsyncImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = p.base.minLatency + async.sync) },
uFn = { p => TLAsyncManagerPortParameters(async, p) }) with FormatNode[TLAsyncEdgeParameters, TLEdgeOut]
// Rationally related crossings
trait TLRationalFormatNode extends FormatNode[TLRationalEdgeParameters, TLRationalEdgeParameters]
object TLRationalImp extends SimpleNodeImp[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalEdgeParameters, TLRationalBundle]
{
def edge(pd: TLRationalClientPortParameters, pu: TLRationalManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLRationalEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLRationalEdgeParameters) = new TLRationalBundle(e.bundle)
def render(e: TLRationalEdgeParameters) = RenderedEdge(colour = "#00ff00" /* green */)
override def mixO(pd: TLRationalClientPortParameters, node: OutwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLRationalManagerPortParameters, node: InwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLRationalAdapterNode(
clientFn: TLRationalClientPortParameters => TLRationalClientPortParameters = { s => s },
managerFn: TLRationalManagerPortParameters => TLRationalManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLRationalImp)(clientFn, managerFn) with TLRationalFormatNode
case class TLRationalIdentityNode()(implicit valName: ValName) extends IdentityNode(TLRationalImp)() with TLRationalFormatNode
object TLRationalNameNode {
def apply(name: ValName) = TLRationalIdentityNode()(name)
def apply(name: Option[String]): TLRationalIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLRationalIdentityNode = apply(Some(name))
}
case class TLRationalSourceNode()(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLRationalImp)(
dFn = { p => TLRationalClientPortParameters(p) },
uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLRationalEdgeParameters] // discard cycles from other clock domain
case class TLRationalSinkNode(direction: RationalDirection)(implicit valName: ValName)
extends MixedAdapterNode(TLRationalImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = 1) },
uFn = { p => TLRationalManagerPortParameters(direction, p) }) with FormatNode[TLRationalEdgeParameters, TLEdgeOut]
// Credited version of TileLink channels
trait TLCreditedFormatNode extends FormatNode[TLCreditedEdgeParameters, TLCreditedEdgeParameters]
object TLCreditedImp extends SimpleNodeImp[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedEdgeParameters, TLCreditedBundle]
{
def edge(pd: TLCreditedClientPortParameters, pu: TLCreditedManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLCreditedEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLCreditedEdgeParameters) = new TLCreditedBundle(e.bundle)
def render(e: TLCreditedEdgeParameters) = RenderedEdge(colour = "#ffff00" /* yellow */, e.delay.toString)
override def mixO(pd: TLCreditedClientPortParameters, node: OutwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLCreditedManagerPortParameters, node: InwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLCreditedAdapterNode(
clientFn: TLCreditedClientPortParameters => TLCreditedClientPortParameters = { s => s },
managerFn: TLCreditedManagerPortParameters => TLCreditedManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLCreditedImp)(clientFn, managerFn) with TLCreditedFormatNode
case class TLCreditedIdentityNode()(implicit valName: ValName) extends IdentityNode(TLCreditedImp)() with TLCreditedFormatNode
object TLCreditedNameNode {
def apply(name: ValName) = TLCreditedIdentityNode()(name)
def apply(name: Option[String]): TLCreditedIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLCreditedIdentityNode = apply(Some(name))
}
case class TLCreditedSourceNode(delay: TLCreditedDelay)(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLCreditedImp)(
dFn = { p => TLCreditedClientPortParameters(delay, p) },
uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLCreditedEdgeParameters] // discard cycles from other clock domain
case class TLCreditedSinkNode(delay: TLCreditedDelay)(implicit valName: ValName)
extends MixedAdapterNode(TLCreditedImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = 1) },
uFn = { p => TLCreditedManagerPortParameters(delay, p) }) with FormatNode[TLCreditedEdgeParameters, TLEdgeOut]
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
File MixedNode.scala:
package org.chipsalliance.diplomacy.nodes
import chisel3.{Data, DontCare, Wire}
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.{Field, Parameters}
import org.chipsalliance.diplomacy.ValName
import org.chipsalliance.diplomacy.sourceLine
/** One side metadata of a [[Dangle]].
*
* Describes one side of an edge going into or out of a [[BaseNode]].
*
* @param serial
* the global [[BaseNode.serial]] number of the [[BaseNode]] that this [[HalfEdge]] connects to.
* @param index
* the `index` in the [[BaseNode]]'s input or output port list that this [[HalfEdge]] belongs to.
*/
case class HalfEdge(serial: Int, index: Int) extends Ordered[HalfEdge] {
import scala.math.Ordered.orderingToOrdered
def compare(that: HalfEdge): Int = HalfEdge.unapply(this).compare(HalfEdge.unapply(that))
}
/** [[Dangle]] captures the `IO` information of a [[LazyModule]] and which two [[BaseNode]]s the [[Edges]]/[[Bundle]]
* connects.
*
* [[Dangle]]s are generated by [[BaseNode.instantiate]] using [[MixedNode.danglesOut]] and [[MixedNode.danglesIn]] ,
* [[LazyModuleImp.instantiate]] connects those that go to internal or explicit IO connections in a [[LazyModule]].
*
* @param source
* the source [[HalfEdge]] of this [[Dangle]], which captures the source [[BaseNode]] and the port `index` within
* that [[BaseNode]].
* @param sink
* sink [[HalfEdge]] of this [[Dangle]], which captures the sink [[BaseNode]] and the port `index` within that
* [[BaseNode]].
* @param flipped
* flip or not in [[AutoBundle.makeElements]]. If true this corresponds to `danglesOut`, if false it corresponds to
* `danglesIn`.
* @param dataOpt
* actual [[Data]] for the hardware connection. Can be empty if this belongs to a cloned module
*/
case class Dangle(source: HalfEdge, sink: HalfEdge, flipped: Boolean, name: String, dataOpt: Option[Data]) {
def data = dataOpt.get
}
/** [[Edges]] is a collection of parameters describing the functionality and connection for an interface, which is often
* derived from the interconnection protocol and can inform the parameterization of the hardware bundles that actually
* implement the protocol.
*/
case class Edges[EI, EO](in: Seq[EI], out: Seq[EO])
/** A field available in [[Parameters]] used to determine whether [[InwardNodeImp.monitor]] will be called. */
case object MonitorsEnabled extends Field[Boolean](true)
/** When rendering the edge in a graphical format, flip the order in which the edges' source and sink are presented.
*
* For example, when rendering graphML, yEd by default tries to put the source node vertically above the sink node, but
* [[RenderFlipped]] inverts this relationship. When a particular [[LazyModule]] contains both source nodes and sink
* nodes, flipping the rendering of one node's edge will usual produce a more concise visual layout for the
* [[LazyModule]].
*/
case object RenderFlipped extends Field[Boolean](false)
/** The sealed node class in the package, all node are derived from it.
*
* @param inner
* Sink interface implementation.
* @param outer
* Source interface implementation.
* @param valName
* val name of this node.
* @tparam DI
* Downward-flowing parameters received on the inner side of the node. It is usually a brunch of parameters
* describing the protocol parameters from a source. For an [[InwardNode]], it is determined by the connected
* [[OutwardNode]]. Since it can be connected to multiple sources, this parameter is always a Seq of source port
* parameters.
* @tparam UI
* Upward-flowing parameters generated by the inner side of the node. It is usually a brunch of parameters describing
* the protocol parameters of a sink. For an [[InwardNode]], it is determined itself.
* @tparam EI
* Edge Parameters describing a connection on the inner side of the node. It is usually a brunch of transfers
* specified for a sink according to protocol.
* @tparam BI
* Bundle type used when connecting to the inner side of the node. It is a hardware interface of this sink interface.
* It should extends from [[chisel3.Data]], which represents the real hardware.
* @tparam DO
* Downward-flowing parameters generated on the outer side of the node. It is usually a brunch of parameters
* describing the protocol parameters of a source. For an [[OutwardNode]], it is determined itself.
* @tparam UO
* Upward-flowing parameters received by the outer side of the node. It is usually a brunch of parameters describing
* the protocol parameters from a sink. For an [[OutwardNode]], it is determined by the connected [[InwardNode]].
* Since it can be connected to multiple sinks, this parameter is always a Seq of sink port parameters.
* @tparam EO
* Edge Parameters describing a connection on the outer side of the node. It is usually a brunch of transfers
* specified for a source according to protocol.
* @tparam BO
* Bundle type used when connecting to the outer side of the node. It is a hardware interface of this source
* interface. It should extends from [[chisel3.Data]], which represents the real hardware.
*
* @note
* Call Graph of [[MixedNode]]
* - line `─`: source is process by a function and generate pass to others
* - Arrow `→`: target of arrow is generated by source
*
* {{{
* (from the other node)
* ┌─────────────────────────────────────────────────────────[[InwardNode.uiParams]]─────────────┐
* ↓ │
* (binding node when elaboration) [[OutwardNode.uoParams]]────────────────────────[[MixedNode.mapParamsU]]→──────────┐ │
* [[InwardNode.accPI]] │ │ │
* │ │ (based on protocol) │
* │ │ [[MixedNode.inner.edgeI]] │
* │ │ ↓ │
* ↓ │ │ │
* (immobilize after elaboration) (inward port from [[OutwardNode]]) │ ↓ │
* [[InwardNode.iBindings]]──┐ [[MixedNode.iDirectPorts]]────────────────────→[[MixedNode.iPorts]] [[InwardNode.uiParams]] │
* │ │ ↑ │ │ │
* │ │ │ [[OutwardNode.doParams]] │ │
* │ │ │ (from the other node) │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* │ │ │ └────────┬──────────────┤ │
* │ │ │ │ │ │
* │ │ │ │ (based on protocol) │
* │ │ │ │ [[MixedNode.inner.edgeI]] │
* │ │ │ │ │ │
* │ │ (from the other node) │ ↓ │
* │ └───[[OutwardNode.oPortMapping]] [[OutwardNode.oStar]] │ [[MixedNode.edgesIn]]───┐ │
* │ ↑ ↑ │ │ ↓ │
* │ │ │ │ │ [[MixedNode.in]] │
* │ │ │ │ ↓ ↑ │
* │ (solve star connection) │ │ │ [[MixedNode.bundleIn]]──┘ │
* ├───[[MixedNode.resolveStar]]→─┼─────────────────────────────┤ └────────────────────────────────────┐ │
* │ │ │ [[MixedNode.bundleOut]]─┐ │ │
* │ │ │ ↑ ↓ │ │
* │ │ │ │ [[MixedNode.out]] │ │
* │ ↓ ↓ │ ↑ │ │
* │ ┌─────[[InwardNode.iPortMapping]] [[InwardNode.iStar]] [[MixedNode.edgesOut]]──┘ │ │
* │ │ (from the other node) ↑ │ │
* │ │ │ │ │ │
* │ │ │ [[MixedNode.outer.edgeO]] │ │
* │ │ │ (based on protocol) │ │
* │ │ │ │ │ │
* │ │ │ ┌────────────────────────────────────────┤ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* (immobilize after elaboration)│ ↓ │ │ │ │
* [[OutwardNode.oBindings]]─┘ [[MixedNode.oDirectPorts]]───→[[MixedNode.oPorts]] [[OutwardNode.doParams]] │ │
* ↑ (inward port from [[OutwardNode]]) │ │ │ │
* │ ┌─────────────────────────────────────────┤ │ │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* [[OutwardNode.accPO]] │ ↓ │ │ │
* (binding node when elaboration) │ [[InwardNode.diParams]]─────→[[MixedNode.mapParamsD]]────────────────────────────┘ │ │
* │ ↑ │ │
* │ └──────────────────────────────────────────────────────────────────────────────────────────┘ │
* └──────────────────────────────────────────────────────────────────────────────────────────────────────────┘
* }}}
*/
abstract class MixedNode[DI, UI, EI, BI <: Data, DO, UO, EO, BO <: Data](
val inner: InwardNodeImp[DI, UI, EI, BI],
val outer: OutwardNodeImp[DO, UO, EO, BO]
)(
implicit valName: ValName)
extends BaseNode
with NodeHandle[DI, UI, EI, BI, DO, UO, EO, BO]
with InwardNode[DI, UI, BI]
with OutwardNode[DO, UO, BO] {
// Generate a [[NodeHandle]] with inward and outward node are both this node.
val inward = this
val outward = this
/** Debug info of nodes binding. */
def bindingInfo: String = s"""$iBindingInfo
|$oBindingInfo
|""".stripMargin
/** Debug info of ports connecting. */
def connectedPortsInfo: String = s"""${oPorts.size} outward ports connected: [${oPorts.map(_._2.name).mkString(",")}]
|${iPorts.size} inward ports connected: [${iPorts.map(_._2.name).mkString(",")}]
|""".stripMargin
/** Debug info of parameters propagations. */
def parametersInfo: String = s"""${doParams.size} downstream outward parameters: [${doParams.mkString(",")}]
|${uoParams.size} upstream outward parameters: [${uoParams.mkString(",")}]
|${diParams.size} downstream inward parameters: [${diParams.mkString(",")}]
|${uiParams.size} upstream inward parameters: [${uiParams.mkString(",")}]
|""".stripMargin
/** For a given node, converts [[OutwardNode.accPO]] and [[InwardNode.accPI]] to [[MixedNode.oPortMapping]] and
* [[MixedNode.iPortMapping]].
*
* Given counts of known inward and outward binding and inward and outward star bindings, return the resolved inward
* stars and outward stars.
*
* This method will also validate the arguments and throw a runtime error if the values are unsuitable for this type
* of node.
*
* @param iKnown
* Number of known-size ([[BIND_ONCE]]) input bindings.
* @param oKnown
* Number of known-size ([[BIND_ONCE]]) output bindings.
* @param iStar
* Number of unknown size ([[BIND_STAR]]) input bindings.
* @param oStar
* Number of unknown size ([[BIND_STAR]]) output bindings.
* @return
* A Tuple of the resolved number of input and output connections.
*/
protected[diplomacy] def resolveStar(iKnown: Int, oKnown: Int, iStar: Int, oStar: Int): (Int, Int)
/** Function to generate downward-flowing outward params from the downward-flowing input params and the current output
* ports.
*
* @param n
* The size of the output sequence to generate.
* @param p
* Sequence of downward-flowing input parameters of this node.
* @return
* A `n`-sized sequence of downward-flowing output edge parameters.
*/
protected[diplomacy] def mapParamsD(n: Int, p: Seq[DI]): Seq[DO]
/** Function to generate upward-flowing input parameters from the upward-flowing output parameters [[uiParams]].
*
* @param n
* Size of the output sequence.
* @param p
* Upward-flowing output edge parameters.
* @return
* A n-sized sequence of upward-flowing input edge parameters.
*/
protected[diplomacy] def mapParamsU(n: Int, p: Seq[UO]): Seq[UI]
/** @return
* The sink cardinality of the node, the number of outputs bound with [[BIND_QUERY]] summed with inputs bound with
* [[BIND_STAR]].
*/
protected[diplomacy] lazy val sinkCard: Int = oBindings.count(_._3 == BIND_QUERY) + iBindings.count(_._3 == BIND_STAR)
/** @return
* The source cardinality of this node, the number of inputs bound with [[BIND_QUERY]] summed with the number of
* output bindings bound with [[BIND_STAR]].
*/
protected[diplomacy] lazy val sourceCard: Int =
iBindings.count(_._3 == BIND_QUERY) + oBindings.count(_._3 == BIND_STAR)
/** @return list of nodes involved in flex bindings with this node. */
protected[diplomacy] lazy val flexes: Seq[BaseNode] =
oBindings.filter(_._3 == BIND_FLEX).map(_._2) ++ iBindings.filter(_._3 == BIND_FLEX).map(_._2)
/** Resolves the flex to be either source or sink and returns the offset where the [[BIND_STAR]] operators begin
* greedily taking up the remaining connections.
*
* @return
* A value >= 0 if it is sink cardinality, a negative value for source cardinality. The magnitude of the return
* value is not relevant.
*/
protected[diplomacy] lazy val flexOffset: Int = {
/** Recursively performs a depth-first search of the [[flexes]], [[BaseNode]]s connected to this node with flex
* operators. The algorithm bottoms out when we either get to a node we have already visited or when we get to a
* connection that is not a flex and can set the direction for us. Otherwise, recurse by visiting the `flexes` of
* each node in the current set and decide whether they should be added to the set or not.
*
* @return
* the mapping of [[BaseNode]] indexed by their serial numbers.
*/
def DFS(v: BaseNode, visited: Map[Int, BaseNode]): Map[Int, BaseNode] = {
if (visited.contains(v.serial) || !v.flexibleArityDirection) {
visited
} else {
v.flexes.foldLeft(visited + (v.serial -> v))((sum, n) => DFS(n, sum))
}
}
/** Determine which [[BaseNode]] are involved in resolving the flex connections to/from this node.
*
* @example
* {{{
* a :*=* b :*=* c
* d :*=* b
* e :*=* f
* }}}
*
* `flexSet` for `a`, `b`, `c`, or `d` will be `Set(a, b, c, d)` `flexSet` for `e` or `f` will be `Set(e,f)`
*/
val flexSet = DFS(this, Map()).values
/** The total number of :*= operators where we're on the left. */
val allSink = flexSet.map(_.sinkCard).sum
/** The total number of :=* operators used when we're on the right. */
val allSource = flexSet.map(_.sourceCard).sum
require(
allSink == 0 || allSource == 0,
s"The nodes ${flexSet.map(_.name)} which are inter-connected by :*=* have ${allSink} :*= operators and ${allSource} :=* operators connected to them, making it impossible to determine cardinality inference direction."
)
allSink - allSource
}
/** @return A value >= 0 if it is sink cardinality, a negative value for source cardinality. */
protected[diplomacy] def edgeArityDirection(n: BaseNode): Int = {
if (flexibleArityDirection) flexOffset
else if (n.flexibleArityDirection) n.flexOffset
else 0
}
/** For a node which is connected between two nodes, select the one that will influence the direction of the flex
* resolution.
*/
protected[diplomacy] def edgeAritySelect(n: BaseNode, l: => Int, r: => Int): Int = {
val dir = edgeArityDirection(n)
if (dir < 0) l
else if (dir > 0) r
else 1
}
/** Ensure that the same node is not visited twice in resolving `:*=`, etc operators. */
private var starCycleGuard = false
/** Resolve all the star operators into concrete indicies. As connections are being made, some may be "star"
* connections which need to be resolved. In some way to determine how many actual edges they correspond to. We also
* need to build up the ranges of edges which correspond to each binding operator, so that We can apply the correct
* edge parameters and later build up correct bundle connections.
*
* [[oPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that oPort (binding
* operator). [[iPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that iPort
* (binding operator). [[oStar]]: `Int` the value to return for this node `N` for any `N :*= foo` or `N :*=* foo :*=
* bar` [[iStar]]: `Int` the value to return for this node `N` for any `foo :=* N` or `bar :=* foo :*=* N`
*/
protected[diplomacy] lazy val (
oPortMapping: Seq[(Int, Int)],
iPortMapping: Seq[(Int, Int)],
oStar: Int,
iStar: Int
) = {
try {
if (starCycleGuard) throw StarCycleException()
starCycleGuard = true
// For a given node N...
// Number of foo :=* N
// + Number of bar :=* foo :*=* N
val oStars = oBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) < 0)
}
// Number of N :*= foo
// + Number of N :*=* foo :*= bar
val iStars = iBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) > 0)
}
// 1 for foo := N
// + bar.iStar for bar :*= foo :*=* N
// + foo.iStar for foo :*= N
// + 0 for foo :=* N
val oKnown = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, 0, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => 0
}
}.sum
// 1 for N := foo
// + bar.oStar for N :*=* foo :=* bar
// + foo.oStar for N :=* foo
// + 0 for N :*= foo
val iKnown = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, 0)
case BIND_QUERY => n.oStar
case BIND_STAR => 0
}
}.sum
// Resolve star depends on the node subclass to implement the algorithm for this.
val (iStar, oStar) = resolveStar(iKnown, oKnown, iStars, oStars)
// Cumulative list of resolved outward binding range starting points
val oSum = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, oStar, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => oStar
}
}.scanLeft(0)(_ + _)
// Cumulative list of resolved inward binding range starting points
val iSum = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, iStar)
case BIND_QUERY => n.oStar
case BIND_STAR => iStar
}
}.scanLeft(0)(_ + _)
// Create ranges for each binding based on the running sums and return
// those along with resolved values for the star operations.
(oSum.init.zip(oSum.tail), iSum.init.zip(iSum.tail), oStar, iStar)
} catch {
case c: StarCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Sequence of inward ports.
*
* This should be called after all star bindings are resolved.
*
* Each element is: `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding.
* `n` Instance of inward node. `p` View of [[Parameters]] where this connection was made. `s` Source info where this
* connection was made in the source code.
*/
protected[diplomacy] lazy val oDirectPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] =
oBindings.flatMap { case (i, n, _, p, s) =>
// for each binding operator in this node, look at what it connects to
val (start, end) = n.iPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
/** Sequence of outward ports.
*
* This should be called after all star bindings are resolved.
*
* `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. `n` Instance of
* outward node. `p` View of [[Parameters]] where this connection was made. `s` [[SourceInfo]] where this connection
* was made in the source code.
*/
protected[diplomacy] lazy val iDirectPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] =
iBindings.flatMap { case (i, n, _, p, s) =>
// query this port index range of this node in the other side of node.
val (start, end) = n.oPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
// Ephemeral nodes ( which have non-None iForward/oForward) have in_degree = out_degree
// Thus, there must exist an Eulerian path and the below algorithms terminate
@scala.annotation.tailrec
private def oTrace(
tuple: (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)
): (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.iForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => oTrace((j, m, p, s))
}
}
@scala.annotation.tailrec
private def iTrace(
tuple: (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)
): (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.oForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => iTrace((j, m, p, s))
}
}
/** Final output ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - Numeric index of this binding in the [[InwardNode]] on the other end.
* - [[InwardNode]] on the other end of this binding.
* - A view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val oPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oDirectPorts.map(oTrace)
/** Final input ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - numeric index of this binding in [[OutwardNode]] on the other end.
* - [[OutwardNode]] on the other end of this binding.
* - a view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val iPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iDirectPorts.map(iTrace)
private var oParamsCycleGuard = false
protected[diplomacy] lazy val diParams: Seq[DI] = iPorts.map { case (i, n, _, _) => n.doParams(i) }
protected[diplomacy] lazy val doParams: Seq[DO] = {
try {
if (oParamsCycleGuard) throw DownwardCycleException()
oParamsCycleGuard = true
val o = mapParamsD(oPorts.size, diParams)
require(
o.size == oPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of outward ports should equal the number of produced outward parameters.
|$context
|$connectedPortsInfo
|Downstreamed inward parameters: [${diParams.mkString(",")}]
|Produced outward parameters: [${o.mkString(",")}]
|""".stripMargin
)
o.map(outer.mixO(_, this))
} catch {
case c: DownwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
private var iParamsCycleGuard = false
protected[diplomacy] lazy val uoParams: Seq[UO] = oPorts.map { case (o, n, _, _) => n.uiParams(o) }
protected[diplomacy] lazy val uiParams: Seq[UI] = {
try {
if (iParamsCycleGuard) throw UpwardCycleException()
iParamsCycleGuard = true
val i = mapParamsU(iPorts.size, uoParams)
require(
i.size == iPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of inward ports should equal the number of produced inward parameters.
|$context
|$connectedPortsInfo
|Upstreamed outward parameters: [${uoParams.mkString(",")}]
|Produced inward parameters: [${i.mkString(",")}]
|""".stripMargin
)
i.map(inner.mixI(_, this))
} catch {
case c: UpwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Outward edge parameters. */
protected[diplomacy] lazy val edgesOut: Seq[EO] =
(oPorts.zip(doParams)).map { case ((i, n, p, s), o) => outer.edgeO(o, n.uiParams(i), p, s) }
/** Inward edge parameters. */
protected[diplomacy] lazy val edgesIn: Seq[EI] =
(iPorts.zip(uiParams)).map { case ((o, n, p, s), i) => inner.edgeI(n.doParams(o), i, p, s) }
/** A tuple of the input edge parameters and output edge parameters for the edges bound to this node.
*
* If you need to access to the edges of a foreign Node, use this method (in/out create bundles).
*/
lazy val edges: Edges[EI, EO] = Edges(edgesIn, edgesOut)
/** Create actual Wires corresponding to the Bundles parameterized by the outward edges of this node. */
protected[diplomacy] lazy val bundleOut: Seq[BO] = edgesOut.map { e =>
val x = Wire(outer.bundleO(e)).suggestName(s"${valName.value}Out")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
/** Create actual Wires corresponding to the Bundles parameterized by the inward edges of this node. */
protected[diplomacy] lazy val bundleIn: Seq[BI] = edgesIn.map { e =>
val x = Wire(inner.bundleI(e)).suggestName(s"${valName.value}In")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
private def emptyDanglesOut: Seq[Dangle] = oPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(serial, i),
sink = HalfEdge(n.serial, j),
flipped = false,
name = wirePrefix + "out",
dataOpt = None
)
}
private def emptyDanglesIn: Seq[Dangle] = iPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(n.serial, j),
sink = HalfEdge(serial, i),
flipped = true,
name = wirePrefix + "in",
dataOpt = None
)
}
/** Create the [[Dangle]]s which describe the connections from this node output to other nodes inputs. */
protected[diplomacy] def danglesOut: Seq[Dangle] = emptyDanglesOut.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleOut(i)))
}
/** Create the [[Dangle]]s which describe the connections from this node input from other nodes outputs. */
protected[diplomacy] def danglesIn: Seq[Dangle] = emptyDanglesIn.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleIn(i)))
}
private[diplomacy] var instantiated = false
/** Gather Bundle and edge parameters of outward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def out: Seq[(BO, EO)] = {
require(
instantiated,
s"$name.out should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleOut.zip(edgesOut)
}
/** Gather Bundle and edge parameters of inward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def in: Seq[(BI, EI)] = {
require(
instantiated,
s"$name.in should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleIn.zip(edgesIn)
}
/** Actually instantiate this node during [[LazyModuleImp]] evaluation. Mark that it's safe to use the Bundle wires,
* instantiate monitors on all input ports if appropriate, and return all the dangles of this node.
*/
protected[diplomacy] def instantiate(): Seq[Dangle] = {
instantiated = true
if (!circuitIdentity) {
(iPorts.zip(in)).foreach { case ((_, _, p, _), (b, e)) => if (p(MonitorsEnabled)) inner.monitor(b, e) }
}
danglesOut ++ danglesIn
}
protected[diplomacy] def cloneDangles(): Seq[Dangle] = emptyDanglesOut ++ emptyDanglesIn
/** Connects the outward part of a node with the inward part of this node. */
protected[diplomacy] def bind(
h: OutwardNode[DI, UI, BI],
binding: NodeBinding
)(
implicit p: Parameters,
sourceInfo: SourceInfo
): Unit = {
val x = this // x := y
val y = h
sourceLine(sourceInfo, " at ", "")
val i = x.iPushed
val o = y.oPushed
y.oPush(
i,
x,
binding match {
case BIND_ONCE => BIND_ONCE
case BIND_FLEX => BIND_FLEX
case BIND_STAR => BIND_QUERY
case BIND_QUERY => BIND_STAR
}
)
x.iPush(o, y, binding)
}
/* Metadata for printing the node graph. */
def inputs: Seq[(OutwardNode[DI, UI, BI], RenderedEdge)] = (iPorts.zip(edgesIn)).map { case ((_, n, p, _), e) =>
val re = inner.render(e)
(n, re.copy(flipped = re.flipped != p(RenderFlipped)))
}
/** Metadata for printing the node graph */
def outputs: Seq[(InwardNode[DO, UO, BO], RenderedEdge)] = oPorts.map { case (i, n, _, _) => (n, n.inputs(i)._2) }
}
| module TLBuffer_a32d64s1k1z4u( // @[Buffer.scala:40:9]
input clock, // @[Buffer.scala:40:9]
input reset, // @[Buffer.scala:40:9]
output auto_in_a_ready, // @[LazyModuleImp.scala:107:25]
input auto_in_a_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_in_a_bits_size, // @[LazyModuleImp.scala:107:25]
input [31:0] auto_in_a_bits_address, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_in_a_bits_mask, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_in_a_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_in_d_ready, // @[LazyModuleImp.scala:107:25]
output auto_in_d_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_in_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_in_d_bits_param, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_in_d_bits_size, // @[LazyModuleImp.scala:107:25]
output auto_in_d_bits_source, // @[LazyModuleImp.scala:107:25]
output auto_in_d_bits_sink, // @[LazyModuleImp.scala:107:25]
output auto_in_d_bits_denied, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_in_d_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_in_d_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_out_a_ready, // @[LazyModuleImp.scala:107:25]
output auto_out_a_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_a_bits_param, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_out_a_bits_size, // @[LazyModuleImp.scala:107:25]
output auto_out_a_bits_source, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_out_a_bits_address, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_out_a_bits_mask, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_out_a_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_out_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output auto_out_d_ready, // @[LazyModuleImp.scala:107:25]
input auto_out_d_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_out_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_out_d_bits_param, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_out_d_bits_size, // @[LazyModuleImp.scala:107:25]
input auto_out_d_bits_source, // @[LazyModuleImp.scala:107:25]
input auto_out_d_bits_sink, // @[LazyModuleImp.scala:107:25]
input auto_out_d_bits_denied, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_out_d_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_out_d_bits_corrupt // @[LazyModuleImp.scala:107:25]
);
wire auto_in_a_valid_0 = auto_in_a_valid; // @[Buffer.scala:40:9]
wire [2:0] auto_in_a_bits_opcode_0 = auto_in_a_bits_opcode; // @[Buffer.scala:40:9]
wire [3:0] auto_in_a_bits_size_0 = auto_in_a_bits_size; // @[Buffer.scala:40:9]
wire [31:0] auto_in_a_bits_address_0 = auto_in_a_bits_address; // @[Buffer.scala:40:9]
wire [7:0] auto_in_a_bits_mask_0 = auto_in_a_bits_mask; // @[Buffer.scala:40:9]
wire [63:0] auto_in_a_bits_data_0 = auto_in_a_bits_data; // @[Buffer.scala:40:9]
wire auto_in_d_ready_0 = auto_in_d_ready; // @[Buffer.scala:40:9]
wire auto_out_a_ready_0 = auto_out_a_ready; // @[Buffer.scala:40:9]
wire auto_out_d_valid_0 = auto_out_d_valid; // @[Buffer.scala:40:9]
wire [2:0] auto_out_d_bits_opcode_0 = auto_out_d_bits_opcode; // @[Buffer.scala:40:9]
wire [1:0] auto_out_d_bits_param_0 = auto_out_d_bits_param; // @[Buffer.scala:40:9]
wire [3:0] auto_out_d_bits_size_0 = auto_out_d_bits_size; // @[Buffer.scala:40:9]
wire auto_out_d_bits_source_0 = auto_out_d_bits_source; // @[Buffer.scala:40:9]
wire auto_out_d_bits_sink_0 = auto_out_d_bits_sink; // @[Buffer.scala:40:9]
wire auto_out_d_bits_denied_0 = auto_out_d_bits_denied; // @[Buffer.scala:40:9]
wire [63:0] auto_out_d_bits_data_0 = auto_out_d_bits_data; // @[Buffer.scala:40:9]
wire auto_out_d_bits_corrupt_0 = auto_out_d_bits_corrupt; // @[Buffer.scala:40:9]
wire auto_in_a_bits_source = 1'h0; // @[Decoupled.scala:362:21]
wire auto_in_a_bits_corrupt = 1'h0; // @[Decoupled.scala:362:21]
wire nodeIn_a_bits_source = 1'h0; // @[Decoupled.scala:362:21]
wire nodeIn_a_bits_corrupt = 1'h0; // @[Decoupled.scala:362:21]
wire [2:0] auto_in_a_bits_param = 3'h0; // @[Decoupled.scala:362:21]
wire nodeIn_a_ready; // @[MixedNode.scala:551:17]
wire [2:0] nodeIn_a_bits_param = 3'h0; // @[Decoupled.scala:362:21]
wire nodeIn_a_valid = auto_in_a_valid_0; // @[Buffer.scala:40:9]
wire [2:0] nodeIn_a_bits_opcode = auto_in_a_bits_opcode_0; // @[Buffer.scala:40:9]
wire [3:0] nodeIn_a_bits_size = auto_in_a_bits_size_0; // @[Buffer.scala:40:9]
wire [31:0] nodeIn_a_bits_address = auto_in_a_bits_address_0; // @[Buffer.scala:40:9]
wire [7:0] nodeIn_a_bits_mask = auto_in_a_bits_mask_0; // @[Buffer.scala:40:9]
wire [63:0] nodeIn_a_bits_data = auto_in_a_bits_data_0; // @[Buffer.scala:40:9]
wire nodeIn_d_ready = auto_in_d_ready_0; // @[Buffer.scala:40:9]
wire nodeIn_d_valid; // @[MixedNode.scala:551:17]
wire [2:0] nodeIn_d_bits_opcode; // @[MixedNode.scala:551:17]
wire [1:0] nodeIn_d_bits_param; // @[MixedNode.scala:551:17]
wire [3:0] nodeIn_d_bits_size; // @[MixedNode.scala:551:17]
wire nodeIn_d_bits_source; // @[MixedNode.scala:551:17]
wire nodeIn_d_bits_sink; // @[MixedNode.scala:551:17]
wire nodeIn_d_bits_denied; // @[MixedNode.scala:551:17]
wire [63:0] nodeIn_d_bits_data; // @[MixedNode.scala:551:17]
wire nodeIn_d_bits_corrupt; // @[MixedNode.scala:551:17]
wire nodeOut_a_ready = auto_out_a_ready_0; // @[Buffer.scala:40:9]
wire nodeOut_a_valid; // @[MixedNode.scala:542:17]
wire [2:0] nodeOut_a_bits_opcode; // @[MixedNode.scala:542:17]
wire [2:0] nodeOut_a_bits_param; // @[MixedNode.scala:542:17]
wire [3:0] nodeOut_a_bits_size; // @[MixedNode.scala:542:17]
wire nodeOut_a_bits_source; // @[MixedNode.scala:542:17]
wire [31:0] nodeOut_a_bits_address; // @[MixedNode.scala:542:17]
wire [7:0] nodeOut_a_bits_mask; // @[MixedNode.scala:542:17]
wire [63:0] nodeOut_a_bits_data; // @[MixedNode.scala:542:17]
wire nodeOut_a_bits_corrupt; // @[MixedNode.scala:542:17]
wire nodeOut_d_ready; // @[MixedNode.scala:542:17]
wire nodeOut_d_valid = auto_out_d_valid_0; // @[Buffer.scala:40:9]
wire [2:0] nodeOut_d_bits_opcode = auto_out_d_bits_opcode_0; // @[Buffer.scala:40:9]
wire [1:0] nodeOut_d_bits_param = auto_out_d_bits_param_0; // @[Buffer.scala:40:9]
wire [3:0] nodeOut_d_bits_size = auto_out_d_bits_size_0; // @[Buffer.scala:40:9]
wire nodeOut_d_bits_source = auto_out_d_bits_source_0; // @[Buffer.scala:40:9]
wire nodeOut_d_bits_sink = auto_out_d_bits_sink_0; // @[Buffer.scala:40:9]
wire nodeOut_d_bits_denied = auto_out_d_bits_denied_0; // @[Buffer.scala:40:9]
wire [63:0] nodeOut_d_bits_data = auto_out_d_bits_data_0; // @[Buffer.scala:40:9]
wire nodeOut_d_bits_corrupt = auto_out_d_bits_corrupt_0; // @[Buffer.scala:40:9]
wire auto_in_a_ready_0; // @[Buffer.scala:40:9]
wire [2:0] auto_in_d_bits_opcode_0; // @[Buffer.scala:40:9]
wire [1:0] auto_in_d_bits_param_0; // @[Buffer.scala:40:9]
wire [3:0] auto_in_d_bits_size_0; // @[Buffer.scala:40:9]
wire auto_in_d_bits_source_0; // @[Buffer.scala:40:9]
wire auto_in_d_bits_sink_0; // @[Buffer.scala:40:9]
wire auto_in_d_bits_denied_0; // @[Buffer.scala:40:9]
wire [63:0] auto_in_d_bits_data_0; // @[Buffer.scala:40:9]
wire auto_in_d_bits_corrupt_0; // @[Buffer.scala:40:9]
wire auto_in_d_valid_0; // @[Buffer.scala:40:9]
wire [2:0] auto_out_a_bits_opcode_0; // @[Buffer.scala:40:9]
wire [2:0] auto_out_a_bits_param_0; // @[Buffer.scala:40:9]
wire [3:0] auto_out_a_bits_size_0; // @[Buffer.scala:40:9]
wire auto_out_a_bits_source_0; // @[Buffer.scala:40:9]
wire [31:0] auto_out_a_bits_address_0; // @[Buffer.scala:40:9]
wire [7:0] auto_out_a_bits_mask_0; // @[Buffer.scala:40:9]
wire [63:0] auto_out_a_bits_data_0; // @[Buffer.scala:40:9]
wire auto_out_a_bits_corrupt_0; // @[Buffer.scala:40:9]
wire auto_out_a_valid_0; // @[Buffer.scala:40:9]
wire auto_out_d_ready_0; // @[Buffer.scala:40:9]
assign auto_in_a_ready_0 = nodeIn_a_ready; // @[Buffer.scala:40:9]
assign auto_in_d_valid_0 = nodeIn_d_valid; // @[Buffer.scala:40:9]
assign auto_in_d_bits_opcode_0 = nodeIn_d_bits_opcode; // @[Buffer.scala:40:9]
assign auto_in_d_bits_param_0 = nodeIn_d_bits_param; // @[Buffer.scala:40:9]
assign auto_in_d_bits_size_0 = nodeIn_d_bits_size; // @[Buffer.scala:40:9]
assign auto_in_d_bits_source_0 = nodeIn_d_bits_source; // @[Buffer.scala:40:9]
assign auto_in_d_bits_sink_0 = nodeIn_d_bits_sink; // @[Buffer.scala:40:9]
assign auto_in_d_bits_denied_0 = nodeIn_d_bits_denied; // @[Buffer.scala:40:9]
assign auto_in_d_bits_data_0 = nodeIn_d_bits_data; // @[Buffer.scala:40:9]
assign auto_in_d_bits_corrupt_0 = nodeIn_d_bits_corrupt; // @[Buffer.scala:40:9]
assign auto_out_a_valid_0 = nodeOut_a_valid; // @[Buffer.scala:40:9]
assign auto_out_a_bits_opcode_0 = nodeOut_a_bits_opcode; // @[Buffer.scala:40:9]
assign auto_out_a_bits_param_0 = nodeOut_a_bits_param; // @[Buffer.scala:40:9]
assign auto_out_a_bits_size_0 = nodeOut_a_bits_size; // @[Buffer.scala:40:9]
assign auto_out_a_bits_source_0 = nodeOut_a_bits_source; // @[Buffer.scala:40:9]
assign auto_out_a_bits_address_0 = nodeOut_a_bits_address; // @[Buffer.scala:40:9]
assign auto_out_a_bits_mask_0 = nodeOut_a_bits_mask; // @[Buffer.scala:40:9]
assign auto_out_a_bits_data_0 = nodeOut_a_bits_data; // @[Buffer.scala:40:9]
assign auto_out_a_bits_corrupt_0 = nodeOut_a_bits_corrupt; // @[Buffer.scala:40:9]
assign auto_out_d_ready_0 = nodeOut_d_ready; // @[Buffer.scala:40:9]
TLMonitor_47 monitor ( // @[Nodes.scala:27:25]
.clock (clock),
.reset (reset),
.io_in_a_ready (nodeIn_a_ready), // @[MixedNode.scala:551:17]
.io_in_a_valid (nodeIn_a_valid), // @[MixedNode.scala:551:17]
.io_in_a_bits_opcode (nodeIn_a_bits_opcode), // @[MixedNode.scala:551:17]
.io_in_a_bits_size (nodeIn_a_bits_size), // @[MixedNode.scala:551:17]
.io_in_a_bits_address (nodeIn_a_bits_address), // @[MixedNode.scala:551:17]
.io_in_a_bits_mask (nodeIn_a_bits_mask), // @[MixedNode.scala:551:17]
.io_in_a_bits_data (nodeIn_a_bits_data), // @[MixedNode.scala:551:17]
.io_in_d_ready (nodeIn_d_ready), // @[MixedNode.scala:551:17]
.io_in_d_valid (nodeIn_d_valid), // @[MixedNode.scala:551:17]
.io_in_d_bits_opcode (nodeIn_d_bits_opcode), // @[MixedNode.scala:551:17]
.io_in_d_bits_param (nodeIn_d_bits_param), // @[MixedNode.scala:551:17]
.io_in_d_bits_size (nodeIn_d_bits_size), // @[MixedNode.scala:551:17]
.io_in_d_bits_source (nodeIn_d_bits_source), // @[MixedNode.scala:551:17]
.io_in_d_bits_sink (nodeIn_d_bits_sink), // @[MixedNode.scala:551:17]
.io_in_d_bits_denied (nodeIn_d_bits_denied), // @[MixedNode.scala:551:17]
.io_in_d_bits_data (nodeIn_d_bits_data), // @[MixedNode.scala:551:17]
.io_in_d_bits_corrupt (nodeIn_d_bits_corrupt) // @[MixedNode.scala:551:17]
); // @[Nodes.scala:27:25]
Queue2_TLBundleA_a32d64s1k1z4u nodeOut_a_q ( // @[Decoupled.scala:362:21]
.clock (clock),
.reset (reset),
.io_enq_ready (nodeIn_a_ready),
.io_enq_valid (nodeIn_a_valid), // @[MixedNode.scala:551:17]
.io_enq_bits_opcode (nodeIn_a_bits_opcode), // @[MixedNode.scala:551:17]
.io_enq_bits_size (nodeIn_a_bits_size), // @[MixedNode.scala:551:17]
.io_enq_bits_address (nodeIn_a_bits_address), // @[MixedNode.scala:551:17]
.io_enq_bits_mask (nodeIn_a_bits_mask), // @[MixedNode.scala:551:17]
.io_enq_bits_data (nodeIn_a_bits_data), // @[MixedNode.scala:551:17]
.io_deq_ready (nodeOut_a_ready), // @[MixedNode.scala:542:17]
.io_deq_valid (nodeOut_a_valid),
.io_deq_bits_opcode (nodeOut_a_bits_opcode),
.io_deq_bits_param (nodeOut_a_bits_param),
.io_deq_bits_size (nodeOut_a_bits_size),
.io_deq_bits_source (nodeOut_a_bits_source),
.io_deq_bits_address (nodeOut_a_bits_address),
.io_deq_bits_mask (nodeOut_a_bits_mask),
.io_deq_bits_data (nodeOut_a_bits_data),
.io_deq_bits_corrupt (nodeOut_a_bits_corrupt)
); // @[Decoupled.scala:362:21]
Queue2_TLBundleD_a32d64s1k1z4u nodeIn_d_q ( // @[Decoupled.scala:362:21]
.clock (clock),
.reset (reset),
.io_enq_ready (nodeOut_d_ready),
.io_enq_valid (nodeOut_d_valid), // @[MixedNode.scala:542:17]
.io_enq_bits_opcode (nodeOut_d_bits_opcode), // @[MixedNode.scala:542:17]
.io_enq_bits_param (nodeOut_d_bits_param), // @[MixedNode.scala:542:17]
.io_enq_bits_size (nodeOut_d_bits_size), // @[MixedNode.scala:542:17]
.io_enq_bits_source (nodeOut_d_bits_source), // @[MixedNode.scala:542:17]
.io_enq_bits_sink (nodeOut_d_bits_sink), // @[MixedNode.scala:542:17]
.io_enq_bits_denied (nodeOut_d_bits_denied), // @[MixedNode.scala:542:17]
.io_enq_bits_data (nodeOut_d_bits_data), // @[MixedNode.scala:542:17]
.io_enq_bits_corrupt (nodeOut_d_bits_corrupt), // @[MixedNode.scala:542:17]
.io_deq_ready (nodeIn_d_ready), // @[MixedNode.scala:551:17]
.io_deq_valid (nodeIn_d_valid),
.io_deq_bits_opcode (nodeIn_d_bits_opcode),
.io_deq_bits_param (nodeIn_d_bits_param),
.io_deq_bits_size (nodeIn_d_bits_size),
.io_deq_bits_source (nodeIn_d_bits_source),
.io_deq_bits_sink (nodeIn_d_bits_sink),
.io_deq_bits_denied (nodeIn_d_bits_denied),
.io_deq_bits_data (nodeIn_d_bits_data),
.io_deq_bits_corrupt (nodeIn_d_bits_corrupt)
); // @[Decoupled.scala:362:21]
assign auto_in_a_ready = auto_in_a_ready_0; // @[Buffer.scala:40:9]
assign auto_in_d_valid = auto_in_d_valid_0; // @[Buffer.scala:40:9]
assign auto_in_d_bits_opcode = auto_in_d_bits_opcode_0; // @[Buffer.scala:40:9]
assign auto_in_d_bits_param = auto_in_d_bits_param_0; // @[Buffer.scala:40:9]
assign auto_in_d_bits_size = auto_in_d_bits_size_0; // @[Buffer.scala:40:9]
assign auto_in_d_bits_source = auto_in_d_bits_source_0; // @[Buffer.scala:40:9]
assign auto_in_d_bits_sink = auto_in_d_bits_sink_0; // @[Buffer.scala:40:9]
assign auto_in_d_bits_denied = auto_in_d_bits_denied_0; // @[Buffer.scala:40:9]
assign auto_in_d_bits_data = auto_in_d_bits_data_0; // @[Buffer.scala:40:9]
assign auto_in_d_bits_corrupt = auto_in_d_bits_corrupt_0; // @[Buffer.scala:40:9]
assign auto_out_a_valid = auto_out_a_valid_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_opcode = auto_out_a_bits_opcode_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_param = auto_out_a_bits_param_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_size = auto_out_a_bits_size_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_source = auto_out_a_bits_source_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_address = auto_out_a_bits_address_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_mask = auto_out_a_bits_mask_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_data = auto_out_a_bits_data_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_corrupt = auto_out_a_bits_corrupt_0; // @[Buffer.scala:40:9]
assign auto_out_d_ready = auto_out_d_ready_0; // @[Buffer.scala:40:9]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Buffer.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.diplomacy.BufferParams
class TLBufferNode (
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)(implicit valName: ValName) extends TLAdapterNode(
clientFn = { p => p.v1copy(minLatency = p.minLatency + b.latency + c.latency) },
managerFn = { p => p.v1copy(minLatency = p.minLatency + a.latency + d.latency) }
) {
override lazy val nodedebugstring = s"a:${a.toString}, b:${b.toString}, c:${c.toString}, d:${d.toString}, e:${e.toString}"
override def circuitIdentity = List(a,b,c,d,e).forall(_ == BufferParams.none)
}
class TLBuffer(
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)(implicit p: Parameters) extends LazyModule
{
def this(ace: BufferParams, bd: BufferParams)(implicit p: Parameters) = this(ace, bd, ace, bd, ace)
def this(abcde: BufferParams)(implicit p: Parameters) = this(abcde, abcde)
def this()(implicit p: Parameters) = this(BufferParams.default)
val node = new TLBufferNode(a, b, c, d, e)
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
def headBundle = node.out.head._2.bundle
override def desiredName = (Seq("TLBuffer") ++ node.out.headOption.map(_._2.bundle.shortName)).mkString("_")
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
out.a <> a(in .a)
in .d <> d(out.d)
if (edgeOut.manager.anySupportAcquireB && edgeOut.client.anySupportProbe) {
in .b <> b(out.b)
out.c <> c(in .c)
out.e <> e(in .e)
} else {
in.b.valid := false.B
in.c.ready := true.B
in.e.ready := true.B
out.b.ready := true.B
out.c.valid := false.B
out.e.valid := false.B
}
}
}
}
object TLBuffer
{
def apply() (implicit p: Parameters): TLNode = apply(BufferParams.default)
def apply(abcde: BufferParams) (implicit p: Parameters): TLNode = apply(abcde, abcde)
def apply(ace: BufferParams, bd: BufferParams)(implicit p: Parameters): TLNode = apply(ace, bd, ace, bd, ace)
def apply(
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)(implicit p: Parameters): TLNode =
{
val buffer = LazyModule(new TLBuffer(a, b, c, d, e))
buffer.node
}
def chain(depth: Int, name: Option[String] = None)(implicit p: Parameters): Seq[TLNode] = {
val buffers = Seq.fill(depth) { LazyModule(new TLBuffer()) }
name.foreach { n => buffers.zipWithIndex.foreach { case (b, i) => b.suggestName(s"${n}_${i}") } }
buffers.map(_.node)
}
def chainNode(depth: Int, name: Option[String] = None)(implicit p: Parameters): TLNode = {
chain(depth, name)
.reduceLeftOption(_ :*=* _)
.getOrElse(TLNameNode("no_buffer"))
}
}
File MemoryBus.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.subsystem
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.devices.tilelink.{BuiltInDevices, HasBuiltInDeviceParams, BuiltInErrorDeviceParams, BuiltInZeroDeviceParams}
import freechips.rocketchip.tilelink.{
ReplicatedRegion, HasTLBusParams, HasRegionReplicatorParams, TLBusWrapper,
TLBusWrapperInstantiationLike, RegionReplicator, TLXbar, TLInwardNode,
TLOutwardNode, ProbePicker, TLEdge, TLFIFOFixer
}
import freechips.rocketchip.util.Location
/** Parameterization of the memory-side bus created for each memory channel */
case class MemoryBusParams(
beatBytes: Int,
blockBytes: Int,
dtsFrequency: Option[BigInt] = None,
zeroDevice: Option[BuiltInZeroDeviceParams] = None,
errorDevice: Option[BuiltInErrorDeviceParams] = None,
replication: Option[ReplicatedRegion] = None)
extends HasTLBusParams
with HasBuiltInDeviceParams
with HasRegionReplicatorParams
with TLBusWrapperInstantiationLike
{
def instantiate(context: HasTileLinkLocations, loc: Location[TLBusWrapper])(implicit p: Parameters): MemoryBus = {
val mbus = LazyModule(new MemoryBus(this, loc.name))
mbus.suggestName(loc.name)
context.tlBusWrapperLocationMap += (loc -> mbus)
mbus
}
}
/** Wrapper for creating TL nodes from a bus connected to the back of each mem channel */
class MemoryBus(params: MemoryBusParams, name: String = "memory_bus")(implicit p: Parameters)
extends TLBusWrapper(params, name)(p)
{
private val replicator = params.replication.map(r => LazyModule(new RegionReplicator(r)))
val prefixNode = replicator.map { r =>
r.prefix := addressPrefixNexusNode
addressPrefixNexusNode
}
private val xbar = LazyModule(new TLXbar(nameSuffix = Some(name))).suggestName(busName + "_xbar")
val inwardNode: TLInwardNode =
replicator.map(xbar.node :*=* TLFIFOFixer(TLFIFOFixer.all) :*=* _.node)
.getOrElse(xbar.node :*=* TLFIFOFixer(TLFIFOFixer.all))
val outwardNode: TLOutwardNode = ProbePicker() :*= xbar.node
def busView: TLEdge = xbar.node.edges.in.head
val builtInDevices: BuiltInDevices = BuiltInDevices.attach(params, outwardNode)
}
File ClockDomain.scala:
package freechips.rocketchip.prci
import chisel3._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy.lazymodule._
abstract class Domain(implicit p: Parameters) extends LazyModule with HasDomainCrossing
{
def clockBundle: ClockBundle
lazy val module = new Impl
class Impl extends LazyRawModuleImp(this) {
childClock := clockBundle.clock
childReset := clockBundle.reset
override def provideImplicitClockToLazyChildren = true
// these are just for backwards compatibility with external devices
// that were manually wiring themselves to the domain's clock/reset input:
val clock = IO(Output(chiselTypeOf(clockBundle.clock)))
val reset = IO(Output(chiselTypeOf(clockBundle.reset)))
clock := clockBundle.clock
reset := clockBundle.reset
}
}
abstract class ClockDomain(implicit p: Parameters) extends Domain with HasClockDomainCrossing
class ClockSinkDomain(val clockSinkParams: ClockSinkParameters)(implicit p: Parameters) extends ClockDomain
{
def this(take: Option[ClockParameters] = None, name: Option[String] = None)(implicit p: Parameters) = this(ClockSinkParameters(take = take, name = name))
val clockNode = ClockSinkNode(Seq(clockSinkParams))
def clockBundle = clockNode.in.head._1
override lazy val desiredName = (clockSinkParams.name.toSeq :+ "ClockSinkDomain").mkString
}
class ClockSourceDomain(val clockSourceParams: ClockSourceParameters)(implicit p: Parameters) extends ClockDomain
{
def this(give: Option[ClockParameters] = None, name: Option[String] = None)(implicit p: Parameters) = this(ClockSourceParameters(give = give, name = name))
val clockNode = ClockSourceNode(Seq(clockSourceParams))
def clockBundle = clockNode.out.head._1
override lazy val desiredName = (clockSourceParams.name.toSeq :+ "ClockSourceDomain").mkString
}
abstract class ResetDomain(implicit p: Parameters) extends Domain with HasResetDomainCrossing
File ClockGroup.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.prci
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.lazymodule._
import org.chipsalliance.diplomacy.nodes._
import freechips.rocketchip.resources.FixedClockResource
case class ClockGroupingNode(groupName: String)(implicit valName: ValName)
extends MixedNexusNode(ClockGroupImp, ClockImp)(
dFn = { _ => ClockSourceParameters() },
uFn = { seq => ClockGroupSinkParameters(name = groupName, members = seq) })
{
override def circuitIdentity = outputs.size == 1
}
class ClockGroup(groupName: String)(implicit p: Parameters) extends LazyModule
{
val node = ClockGroupingNode(groupName)
lazy val module = new Impl
class Impl extends LazyRawModuleImp(this) {
val (in, _) = node.in(0)
val (out, _) = node.out.unzip
require (node.in.size == 1)
require (in.member.size == out.size)
(in.member.data zip out) foreach { case (i, o) => o := i }
}
}
object ClockGroup
{
def apply()(implicit p: Parameters, valName: ValName) = LazyModule(new ClockGroup(valName.name)).node
}
case class ClockGroupAggregateNode(groupName: String)(implicit valName: ValName)
extends NexusNode(ClockGroupImp)(
dFn = { _ => ClockGroupSourceParameters() },
uFn = { seq => ClockGroupSinkParameters(name = groupName, members = seq.flatMap(_.members))})
{
override def circuitIdentity = outputs.size == 1
}
class ClockGroupAggregator(groupName: String)(implicit p: Parameters) extends LazyModule
{
val node = ClockGroupAggregateNode(groupName)
override lazy val desiredName = s"ClockGroupAggregator_$groupName"
lazy val module = new Impl
class Impl extends LazyRawModuleImp(this) {
val (in, _) = node.in.unzip
val (out, _) = node.out.unzip
val outputs = out.flatMap(_.member.data)
require (node.in.size == 1, s"Aggregator for groupName: ${groupName} had ${node.in.size} inward edges instead of 1")
require (in.head.member.size == outputs.size)
in.head.member.data.zip(outputs).foreach { case (i, o) => o := i }
}
}
object ClockGroupAggregator
{
def apply()(implicit p: Parameters, valName: ValName) = LazyModule(new ClockGroupAggregator(valName.name)).node
}
class SimpleClockGroupSource(numSources: Int = 1)(implicit p: Parameters) extends LazyModule
{
val node = ClockGroupSourceNode(List.fill(numSources) { ClockGroupSourceParameters() })
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
val (out, _) = node.out.unzip
out.map { out: ClockGroupBundle =>
out.member.data.foreach { o =>
o.clock := clock; o.reset := reset }
}
}
}
object SimpleClockGroupSource
{
def apply(num: Int = 1)(implicit p: Parameters, valName: ValName) = LazyModule(new SimpleClockGroupSource(num)).node
}
case class FixedClockBroadcastNode(fixedClockOpt: Option[ClockParameters])(implicit valName: ValName)
extends NexusNode(ClockImp)(
dFn = { seq => fixedClockOpt.map(_ => ClockSourceParameters(give = fixedClockOpt)).orElse(seq.headOption).getOrElse(ClockSourceParameters()) },
uFn = { seq => fixedClockOpt.map(_ => ClockSinkParameters(take = fixedClockOpt)).orElse(seq.headOption).getOrElse(ClockSinkParameters()) },
inputRequiresOutput = false) {
def fixedClockResources(name: String, prefix: String = "soc/"): Seq[Option[FixedClockResource]] = Seq(fixedClockOpt.map(t => new FixedClockResource(name, t.freqMHz, prefix)))
}
class FixedClockBroadcast(fixedClockOpt: Option[ClockParameters])(implicit p: Parameters) extends LazyModule
{
val node = new FixedClockBroadcastNode(fixedClockOpt) {
override def circuitIdentity = outputs.size == 1
}
lazy val module = new Impl
class Impl extends LazyRawModuleImp(this) {
val (in, _) = node.in(0)
val (out, _) = node.out.unzip
override def desiredName = s"FixedClockBroadcast_${out.size}"
require (node.in.size == 1, "FixedClockBroadcast can only broadcast a single clock")
out.foreach { _ := in }
}
}
object FixedClockBroadcast
{
def apply(fixedClockOpt: Option[ClockParameters] = None)(implicit p: Parameters, valName: ValName) = LazyModule(new FixedClockBroadcast(fixedClockOpt)).node
}
case class PRCIClockGroupNode()(implicit valName: ValName)
extends NexusNode(ClockGroupImp)(
dFn = { _ => ClockGroupSourceParameters() },
uFn = { _ => ClockGroupSinkParameters("prci", Nil) },
outputRequiresInput = false)
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
File ProbePicker.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.diplomacy.{AddressSet, IdRange}
/* A ProbePicker is used to unify multiple cache banks into one logical cache */
class ProbePicker(implicit p: Parameters) extends LazyModule
{
val node = TLAdapterNode(
clientFn = { p =>
// The ProbePicker assembles multiple clients based on the assumption they are contiguous in the clients list
// This should be true for custers of xbar :=* BankBinder connections
def combine(next: TLMasterParameters, pair: (TLMasterParameters, Seq[TLMasterParameters])) = {
val (head, output) = pair
if (head.visibility.exists(x => next.visibility.exists(_.overlaps(x)))) {
(next, head +: output) // pair is not banked, push head without merging
} else {
def redact(x: TLMasterParameters) = x.v1copy(sourceId = IdRange(0,1), nodePath = Nil, visibility = Seq(AddressSet(0, ~0)))
require (redact(next) == redact(head), s"${redact(next)} != ${redact(head)}")
val merge = head.v1copy(
sourceId = IdRange(
head.sourceId.start min next.sourceId.start,
head.sourceId.end max next.sourceId.end),
visibility = AddressSet.unify(head.visibility ++ next.visibility))
(merge, output)
}
}
val myNil: Seq[TLMasterParameters] = Nil
val (head, output) = p.clients.init.foldRight((p.clients.last, myNil))(combine)
p.v1copy(clients = head +: output)
},
managerFn = { p => p })
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
out <> in
// Based on address, adjust source to route to the correct bank
if (edgeIn.client.clients.size != edgeOut.client.clients.size) {
in.b.bits.source := Mux1H(
edgeOut.client.clients.map(_.sourceId contains out.b.bits.source),
edgeOut.client.clients.map { c =>
val banks = edgeIn.client.clients.filter(c.sourceId contains _.sourceId)
if (banks.size == 1) {
out.b.bits.source // allow sharing the value between single-bank cases
} else {
Mux1H(
banks.map(_.visibility.map(_ contains out.b.bits.address).reduce(_ || _)),
banks.map(_.sourceId.start.U))
}
}
)
}
}
}
}
object ProbePicker
{
def apply()(implicit p: Parameters): TLNode = {
val picker = LazyModule(new ProbePicker)
picker.node
}
}
File LazyScope.scala:
package org.chipsalliance.diplomacy.lazymodule
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.ValName
/** Allows dynamic creation of [[Module]] hierarchy and "shoving" logic into a [[LazyModule]]. */
trait LazyScope {
this: LazyModule =>
override def toString: String = s"LazyScope named $name"
/** Evaluate `body` in the current [[LazyModule.scope]] */
def apply[T](body: => T): T = {
// Preserve the previous value of the [[LazyModule.scope]], because when calling [[apply]] function,
// [[LazyModule.scope]] will be altered.
val saved = LazyModule.scope
// [[LazyModule.scope]] stack push.
LazyModule.scope = Some(this)
// Evaluate [[body]] in the current `scope`, saving the result to [[out]].
val out = body
// Check that the `scope` after evaluating `body` is the same as when we started.
require(LazyModule.scope.isDefined, s"LazyScope $name tried to exit, but scope was empty!")
require(
LazyModule.scope.get eq this,
s"LazyScope $name exited before LazyModule ${LazyModule.scope.get.name} was closed"
)
// [[LazyModule.scope]] stack pop.
LazyModule.scope = saved
out
}
}
/** Used to automatically create a level of module hierarchy (a [[SimpleLazyModule]]) within which [[LazyModule]]s can
* be instantiated and connected.
*
* It will instantiate a [[SimpleLazyModule]] to manage evaluation of `body` and evaluate `body` code snippets in this
* scope.
*/
object LazyScope {
/** Create a [[LazyScope]] with an implicit instance name.
*
* @param body
* code executed within the generated [[SimpleLazyModule]].
* @param valName
* instance name of generated [[SimpleLazyModule]].
* @param p
* [[Parameters]] propagated to [[SimpleLazyModule]].
*/
def apply[T](
body: => T
)(
implicit valName: ValName,
p: Parameters
): T = {
apply(valName.value, "SimpleLazyModule", None)(body)(p)
}
/** Create a [[LazyScope]] with an explicitly defined instance name.
*
* @param name
* instance name of generated [[SimpleLazyModule]].
* @param body
* code executed within the generated `SimpleLazyModule`
* @param p
* [[Parameters]] propagated to [[SimpleLazyModule]].
*/
def apply[T](
name: String
)(body: => T
)(
implicit p: Parameters
): T = {
apply(name, "SimpleLazyModule", None)(body)(p)
}
/** Create a [[LazyScope]] with an explicit instance and class name, and control inlining.
*
* @param name
* instance name of generated [[SimpleLazyModule]].
* @param desiredModuleName
* class name of generated [[SimpleLazyModule]].
* @param overrideInlining
* tell FIRRTL that this [[SimpleLazyModule]]'s module should be inlined.
* @param body
* code executed within the generated `SimpleLazyModule`
* @param p
* [[Parameters]] propagated to [[SimpleLazyModule]].
*/
def apply[T](
name: String,
desiredModuleName: String,
overrideInlining: Option[Boolean] = None
)(body: => T
)(
implicit p: Parameters
): T = {
val scope = LazyModule(new SimpleLazyModule with LazyScope {
override lazy val desiredName = desiredModuleName
override def shouldBeInlined = overrideInlining.getOrElse(super.shouldBeInlined)
}).suggestName(name)
scope {
body
}
}
/** Create a [[LazyScope]] to temporarily group children for some reason, but tell Firrtl to inline it.
*
* For example, we might want to control a set of children's clocks but then not keep the parent wrapper.
*
* @param body
* code executed within the generated `SimpleLazyModule`
* @param p
* [[Parameters]] propagated to [[SimpleLazyModule]].
*/
def inline[T](
body: => T
)(
implicit p: Parameters
): T = {
apply("noname", "ShouldBeInlined", Some(false))(body)(p)
}
}
| module MemoryBus( // @[ClockDomain.scala:14:9]
input auto_buffer_out_a_ready, // @[LazyModuleImp.scala:107:25]
output auto_buffer_out_a_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_buffer_out_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_buffer_out_a_bits_param, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_buffer_out_a_bits_size, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_buffer_out_a_bits_source, // @[LazyModuleImp.scala:107:25]
output [27:0] auto_buffer_out_a_bits_address, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_buffer_out_a_bits_mask, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_buffer_out_a_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_buffer_out_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output auto_buffer_out_d_ready, // @[LazyModuleImp.scala:107:25]
input auto_buffer_out_d_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_buffer_out_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_buffer_out_d_bits_param, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_buffer_out_d_bits_size, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_buffer_out_d_bits_source, // @[LazyModuleImp.scala:107:25]
input auto_buffer_out_d_bits_sink, // @[LazyModuleImp.scala:107:25]
input auto_buffer_out_d_bits_denied, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_buffer_out_d_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_buffer_out_d_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_ready, // @[LazyModuleImp.scala:107:25]
output auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_valid, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_id, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_addr, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_len, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_size, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_burst, // @[LazyModuleImp.scala:107:25]
output auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_lock, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_cache, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_prot, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_qos, // @[LazyModuleImp.scala:107:25]
input auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_w_ready, // @[LazyModuleImp.scala:107:25]
output auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_w_valid, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_w_bits_data, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_w_bits_strb, // @[LazyModuleImp.scala:107:25]
output auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_w_bits_last, // @[LazyModuleImp.scala:107:25]
output auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_b_ready, // @[LazyModuleImp.scala:107:25]
input auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_b_valid, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_b_bits_id, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_b_bits_resp, // @[LazyModuleImp.scala:107:25]
input auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_ready, // @[LazyModuleImp.scala:107:25]
output auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_valid, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_id, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_addr, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_len, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_size, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_burst, // @[LazyModuleImp.scala:107:25]
output auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_lock, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_cache, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_prot, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_qos, // @[LazyModuleImp.scala:107:25]
output auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_r_ready, // @[LazyModuleImp.scala:107:25]
input auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_r_valid, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_r_bits_id, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_r_bits_data, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_r_bits_resp, // @[LazyModuleImp.scala:107:25]
input auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_r_bits_last, // @[LazyModuleImp.scala:107:25]
output auto_fixedClockNode_anon_out_1_clock, // @[LazyModuleImp.scala:107:25]
output auto_fixedClockNode_anon_out_0_clock, // @[LazyModuleImp.scala:107:25]
output auto_fixedClockNode_anon_out_0_reset, // @[LazyModuleImp.scala:107:25]
input auto_mbus_clock_groups_in_member_mbus_0_clock, // @[LazyModuleImp.scala:107:25]
input auto_mbus_clock_groups_in_member_mbus_0_reset, // @[LazyModuleImp.scala:107:25]
output auto_bus_xing_in_a_ready, // @[LazyModuleImp.scala:107:25]
input auto_bus_xing_in_a_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_bus_xing_in_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_bus_xing_in_a_bits_param, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_bus_xing_in_a_bits_size, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_bus_xing_in_a_bits_source, // @[LazyModuleImp.scala:107:25]
input [31:0] auto_bus_xing_in_a_bits_address, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_bus_xing_in_a_bits_mask, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_bus_xing_in_a_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_bus_xing_in_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_bus_xing_in_d_ready, // @[LazyModuleImp.scala:107:25]
output auto_bus_xing_in_d_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_bus_xing_in_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_bus_xing_in_d_bits_param, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_bus_xing_in_d_bits_size, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_bus_xing_in_d_bits_source, // @[LazyModuleImp.scala:107:25]
output auto_bus_xing_in_d_bits_sink, // @[LazyModuleImp.scala:107:25]
output auto_bus_xing_in_d_bits_denied, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_bus_xing_in_d_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_bus_xing_in_d_bits_corrupt // @[LazyModuleImp.scala:107:25]
);
wire _buffer_1_auto_in_a_ready; // @[Buffer.scala:75:28]
wire _buffer_1_auto_in_d_valid; // @[Buffer.scala:75:28]
wire [2:0] _buffer_1_auto_in_d_bits_opcode; // @[Buffer.scala:75:28]
wire [1:0] _buffer_1_auto_in_d_bits_param; // @[Buffer.scala:75:28]
wire [2:0] _buffer_1_auto_in_d_bits_size; // @[Buffer.scala:75:28]
wire [3:0] _buffer_1_auto_in_d_bits_source; // @[Buffer.scala:75:28]
wire _buffer_1_auto_in_d_bits_sink; // @[Buffer.scala:75:28]
wire _buffer_1_auto_in_d_bits_denied; // @[Buffer.scala:75:28]
wire [63:0] _buffer_1_auto_in_d_bits_data; // @[Buffer.scala:75:28]
wire _buffer_1_auto_in_d_bits_corrupt; // @[Buffer.scala:75:28]
wire _coupler_to_memory_controller_port_named_axi4_auto_widget_anon_in_a_ready; // @[LazyScope.scala:98:27]
wire _coupler_to_memory_controller_port_named_axi4_auto_widget_anon_in_d_valid; // @[LazyScope.scala:98:27]
wire [2:0] _coupler_to_memory_controller_port_named_axi4_auto_widget_anon_in_d_bits_opcode; // @[LazyScope.scala:98:27]
wire [2:0] _coupler_to_memory_controller_port_named_axi4_auto_widget_anon_in_d_bits_size; // @[LazyScope.scala:98:27]
wire [3:0] _coupler_to_memory_controller_port_named_axi4_auto_widget_anon_in_d_bits_source; // @[LazyScope.scala:98:27]
wire _coupler_to_memory_controller_port_named_axi4_auto_widget_anon_in_d_bits_denied; // @[LazyScope.scala:98:27]
wire [63:0] _coupler_to_memory_controller_port_named_axi4_auto_widget_anon_in_d_bits_data; // @[LazyScope.scala:98:27]
wire _coupler_to_memory_controller_port_named_axi4_auto_widget_anon_in_d_bits_corrupt; // @[LazyScope.scala:98:27]
wire _coupler_to_memory_controller_port_named_axi4_auto_tl_in_a_ready; // @[LazyScope.scala:98:27]
wire _coupler_to_memory_controller_port_named_axi4_auto_tl_in_d_valid; // @[LazyScope.scala:98:27]
wire [2:0] _coupler_to_memory_controller_port_named_axi4_auto_tl_in_d_bits_opcode; // @[LazyScope.scala:98:27]
wire [2:0] _coupler_to_memory_controller_port_named_axi4_auto_tl_in_d_bits_size; // @[LazyScope.scala:98:27]
wire [3:0] _coupler_to_memory_controller_port_named_axi4_auto_tl_in_d_bits_source; // @[LazyScope.scala:98:27]
wire _coupler_to_memory_controller_port_named_axi4_auto_tl_in_d_bits_denied; // @[LazyScope.scala:98:27]
wire [63:0] _coupler_to_memory_controller_port_named_axi4_auto_tl_in_d_bits_data; // @[LazyScope.scala:98:27]
wire _coupler_to_memory_controller_port_named_axi4_auto_tl_in_d_bits_corrupt; // @[LazyScope.scala:98:27]
wire _coupler_to_memory_controller_port_named_axi4_auto_tl_out_a_valid; // @[LazyScope.scala:98:27]
wire [2:0] _coupler_to_memory_controller_port_named_axi4_auto_tl_out_a_bits_opcode; // @[LazyScope.scala:98:27]
wire [2:0] _coupler_to_memory_controller_port_named_axi4_auto_tl_out_a_bits_param; // @[LazyScope.scala:98:27]
wire [2:0] _coupler_to_memory_controller_port_named_axi4_auto_tl_out_a_bits_size; // @[LazyScope.scala:98:27]
wire [3:0] _coupler_to_memory_controller_port_named_axi4_auto_tl_out_a_bits_source; // @[LazyScope.scala:98:27]
wire [31:0] _coupler_to_memory_controller_port_named_axi4_auto_tl_out_a_bits_address; // @[LazyScope.scala:98:27]
wire [7:0] _coupler_to_memory_controller_port_named_axi4_auto_tl_out_a_bits_mask; // @[LazyScope.scala:98:27]
wire [63:0] _coupler_to_memory_controller_port_named_axi4_auto_tl_out_a_bits_data; // @[LazyScope.scala:98:27]
wire _coupler_to_memory_controller_port_named_axi4_auto_tl_out_a_bits_corrupt; // @[LazyScope.scala:98:27]
wire _coupler_to_memory_controller_port_named_axi4_auto_tl_out_d_ready; // @[LazyScope.scala:98:27]
wire _picker_auto_in_1_a_ready; // @[ProbePicker.scala:69:28]
wire _picker_auto_in_1_d_valid; // @[ProbePicker.scala:69:28]
wire [2:0] _picker_auto_in_1_d_bits_opcode; // @[ProbePicker.scala:69:28]
wire [1:0] _picker_auto_in_1_d_bits_param; // @[ProbePicker.scala:69:28]
wire [2:0] _picker_auto_in_1_d_bits_size; // @[ProbePicker.scala:69:28]
wire [3:0] _picker_auto_in_1_d_bits_source; // @[ProbePicker.scala:69:28]
wire _picker_auto_in_1_d_bits_sink; // @[ProbePicker.scala:69:28]
wire _picker_auto_in_1_d_bits_denied; // @[ProbePicker.scala:69:28]
wire [63:0] _picker_auto_in_1_d_bits_data; // @[ProbePicker.scala:69:28]
wire _picker_auto_in_1_d_bits_corrupt; // @[ProbePicker.scala:69:28]
wire _picker_auto_in_0_a_ready; // @[ProbePicker.scala:69:28]
wire _picker_auto_in_0_d_valid; // @[ProbePicker.scala:69:28]
wire [2:0] _picker_auto_in_0_d_bits_opcode; // @[ProbePicker.scala:69:28]
wire [2:0] _picker_auto_in_0_d_bits_size; // @[ProbePicker.scala:69:28]
wire [3:0] _picker_auto_in_0_d_bits_source; // @[ProbePicker.scala:69:28]
wire _picker_auto_in_0_d_bits_denied; // @[ProbePicker.scala:69:28]
wire [63:0] _picker_auto_in_0_d_bits_data; // @[ProbePicker.scala:69:28]
wire _picker_auto_in_0_d_bits_corrupt; // @[ProbePicker.scala:69:28]
wire _picker_auto_out_1_a_valid; // @[ProbePicker.scala:69:28]
wire [2:0] _picker_auto_out_1_a_bits_opcode; // @[ProbePicker.scala:69:28]
wire [2:0] _picker_auto_out_1_a_bits_param; // @[ProbePicker.scala:69:28]
wire [2:0] _picker_auto_out_1_a_bits_size; // @[ProbePicker.scala:69:28]
wire [3:0] _picker_auto_out_1_a_bits_source; // @[ProbePicker.scala:69:28]
wire [27:0] _picker_auto_out_1_a_bits_address; // @[ProbePicker.scala:69:28]
wire [7:0] _picker_auto_out_1_a_bits_mask; // @[ProbePicker.scala:69:28]
wire [63:0] _picker_auto_out_1_a_bits_data; // @[ProbePicker.scala:69:28]
wire _picker_auto_out_1_a_bits_corrupt; // @[ProbePicker.scala:69:28]
wire _picker_auto_out_1_d_ready; // @[ProbePicker.scala:69:28]
wire _picker_auto_out_0_a_valid; // @[ProbePicker.scala:69:28]
wire [2:0] _picker_auto_out_0_a_bits_opcode; // @[ProbePicker.scala:69:28]
wire [2:0] _picker_auto_out_0_a_bits_param; // @[ProbePicker.scala:69:28]
wire [2:0] _picker_auto_out_0_a_bits_size; // @[ProbePicker.scala:69:28]
wire [3:0] _picker_auto_out_0_a_bits_source; // @[ProbePicker.scala:69:28]
wire [31:0] _picker_auto_out_0_a_bits_address; // @[ProbePicker.scala:69:28]
wire [7:0] _picker_auto_out_0_a_bits_mask; // @[ProbePicker.scala:69:28]
wire [63:0] _picker_auto_out_0_a_bits_data; // @[ProbePicker.scala:69:28]
wire _picker_auto_out_0_a_bits_corrupt; // @[ProbePicker.scala:69:28]
wire _picker_auto_out_0_d_ready; // @[ProbePicker.scala:69:28]
wire _mbus_xbar_auto_anon_out_1_a_valid; // @[MemoryBus.scala:47:32]
wire [2:0] _mbus_xbar_auto_anon_out_1_a_bits_opcode; // @[MemoryBus.scala:47:32]
wire [2:0] _mbus_xbar_auto_anon_out_1_a_bits_param; // @[MemoryBus.scala:47:32]
wire [2:0] _mbus_xbar_auto_anon_out_1_a_bits_size; // @[MemoryBus.scala:47:32]
wire [3:0] _mbus_xbar_auto_anon_out_1_a_bits_source; // @[MemoryBus.scala:47:32]
wire [27:0] _mbus_xbar_auto_anon_out_1_a_bits_address; // @[MemoryBus.scala:47:32]
wire [7:0] _mbus_xbar_auto_anon_out_1_a_bits_mask; // @[MemoryBus.scala:47:32]
wire [63:0] _mbus_xbar_auto_anon_out_1_a_bits_data; // @[MemoryBus.scala:47:32]
wire _mbus_xbar_auto_anon_out_1_a_bits_corrupt; // @[MemoryBus.scala:47:32]
wire _mbus_xbar_auto_anon_out_1_d_ready; // @[MemoryBus.scala:47:32]
wire _mbus_xbar_auto_anon_out_0_a_valid; // @[MemoryBus.scala:47:32]
wire [2:0] _mbus_xbar_auto_anon_out_0_a_bits_opcode; // @[MemoryBus.scala:47:32]
wire [2:0] _mbus_xbar_auto_anon_out_0_a_bits_param; // @[MemoryBus.scala:47:32]
wire [2:0] _mbus_xbar_auto_anon_out_0_a_bits_size; // @[MemoryBus.scala:47:32]
wire [3:0] _mbus_xbar_auto_anon_out_0_a_bits_source; // @[MemoryBus.scala:47:32]
wire [31:0] _mbus_xbar_auto_anon_out_0_a_bits_address; // @[MemoryBus.scala:47:32]
wire [7:0] _mbus_xbar_auto_anon_out_0_a_bits_mask; // @[MemoryBus.scala:47:32]
wire [63:0] _mbus_xbar_auto_anon_out_0_a_bits_data; // @[MemoryBus.scala:47:32]
wire _mbus_xbar_auto_anon_out_0_a_bits_corrupt; // @[MemoryBus.scala:47:32]
wire _mbus_xbar_auto_anon_out_0_d_ready; // @[MemoryBus.scala:47:32]
wire _fixedClockNode_auto_anon_out_0_clock; // @[ClockGroup.scala:115:114]
wire _fixedClockNode_auto_anon_out_0_reset; // @[ClockGroup.scala:115:114]
FixedClockBroadcast_3 fixedClockNode ( // @[ClockGroup.scala:115:114]
.auto_anon_in_clock (auto_mbus_clock_groups_in_member_mbus_0_clock),
.auto_anon_in_reset (auto_mbus_clock_groups_in_member_mbus_0_reset),
.auto_anon_out_2_clock (auto_fixedClockNode_anon_out_1_clock),
.auto_anon_out_1_clock (auto_fixedClockNode_anon_out_0_clock),
.auto_anon_out_1_reset (auto_fixedClockNode_anon_out_0_reset),
.auto_anon_out_0_clock (_fixedClockNode_auto_anon_out_0_clock),
.auto_anon_out_0_reset (_fixedClockNode_auto_anon_out_0_reset)
); // @[ClockGroup.scala:115:114]
TLXbar_mbus_i1_o2_a32d64s4k1z3u mbus_xbar ( // @[MemoryBus.scala:47:32]
.clock (_fixedClockNode_auto_anon_out_0_clock), // @[ClockGroup.scala:115:114]
.reset (_fixedClockNode_auto_anon_out_0_reset), // @[ClockGroup.scala:115:114]
.auto_anon_in_a_ready (auto_bus_xing_in_a_ready),
.auto_anon_in_a_valid (auto_bus_xing_in_a_valid),
.auto_anon_in_a_bits_opcode (auto_bus_xing_in_a_bits_opcode),
.auto_anon_in_a_bits_param (auto_bus_xing_in_a_bits_param),
.auto_anon_in_a_bits_size (auto_bus_xing_in_a_bits_size),
.auto_anon_in_a_bits_source (auto_bus_xing_in_a_bits_source),
.auto_anon_in_a_bits_address (auto_bus_xing_in_a_bits_address),
.auto_anon_in_a_bits_mask (auto_bus_xing_in_a_bits_mask),
.auto_anon_in_a_bits_data (auto_bus_xing_in_a_bits_data),
.auto_anon_in_a_bits_corrupt (auto_bus_xing_in_a_bits_corrupt),
.auto_anon_in_d_ready (auto_bus_xing_in_d_ready),
.auto_anon_in_d_valid (auto_bus_xing_in_d_valid),
.auto_anon_in_d_bits_opcode (auto_bus_xing_in_d_bits_opcode),
.auto_anon_in_d_bits_param (auto_bus_xing_in_d_bits_param),
.auto_anon_in_d_bits_size (auto_bus_xing_in_d_bits_size),
.auto_anon_in_d_bits_source (auto_bus_xing_in_d_bits_source),
.auto_anon_in_d_bits_sink (auto_bus_xing_in_d_bits_sink),
.auto_anon_in_d_bits_denied (auto_bus_xing_in_d_bits_denied),
.auto_anon_in_d_bits_data (auto_bus_xing_in_d_bits_data),
.auto_anon_in_d_bits_corrupt (auto_bus_xing_in_d_bits_corrupt),
.auto_anon_out_1_a_ready (_picker_auto_in_1_a_ready), // @[ProbePicker.scala:69:28]
.auto_anon_out_1_a_valid (_mbus_xbar_auto_anon_out_1_a_valid),
.auto_anon_out_1_a_bits_opcode (_mbus_xbar_auto_anon_out_1_a_bits_opcode),
.auto_anon_out_1_a_bits_param (_mbus_xbar_auto_anon_out_1_a_bits_param),
.auto_anon_out_1_a_bits_size (_mbus_xbar_auto_anon_out_1_a_bits_size),
.auto_anon_out_1_a_bits_source (_mbus_xbar_auto_anon_out_1_a_bits_source),
.auto_anon_out_1_a_bits_address (_mbus_xbar_auto_anon_out_1_a_bits_address),
.auto_anon_out_1_a_bits_mask (_mbus_xbar_auto_anon_out_1_a_bits_mask),
.auto_anon_out_1_a_bits_data (_mbus_xbar_auto_anon_out_1_a_bits_data),
.auto_anon_out_1_a_bits_corrupt (_mbus_xbar_auto_anon_out_1_a_bits_corrupt),
.auto_anon_out_1_d_ready (_mbus_xbar_auto_anon_out_1_d_ready),
.auto_anon_out_1_d_valid (_picker_auto_in_1_d_valid), // @[ProbePicker.scala:69:28]
.auto_anon_out_1_d_bits_opcode (_picker_auto_in_1_d_bits_opcode), // @[ProbePicker.scala:69:28]
.auto_anon_out_1_d_bits_param (_picker_auto_in_1_d_bits_param), // @[ProbePicker.scala:69:28]
.auto_anon_out_1_d_bits_size (_picker_auto_in_1_d_bits_size), // @[ProbePicker.scala:69:28]
.auto_anon_out_1_d_bits_source (_picker_auto_in_1_d_bits_source), // @[ProbePicker.scala:69:28]
.auto_anon_out_1_d_bits_sink (_picker_auto_in_1_d_bits_sink), // @[ProbePicker.scala:69:28]
.auto_anon_out_1_d_bits_denied (_picker_auto_in_1_d_bits_denied), // @[ProbePicker.scala:69:28]
.auto_anon_out_1_d_bits_data (_picker_auto_in_1_d_bits_data), // @[ProbePicker.scala:69:28]
.auto_anon_out_1_d_bits_corrupt (_picker_auto_in_1_d_bits_corrupt), // @[ProbePicker.scala:69:28]
.auto_anon_out_0_a_ready (_picker_auto_in_0_a_ready), // @[ProbePicker.scala:69:28]
.auto_anon_out_0_a_valid (_mbus_xbar_auto_anon_out_0_a_valid),
.auto_anon_out_0_a_bits_opcode (_mbus_xbar_auto_anon_out_0_a_bits_opcode),
.auto_anon_out_0_a_bits_param (_mbus_xbar_auto_anon_out_0_a_bits_param),
.auto_anon_out_0_a_bits_size (_mbus_xbar_auto_anon_out_0_a_bits_size),
.auto_anon_out_0_a_bits_source (_mbus_xbar_auto_anon_out_0_a_bits_source),
.auto_anon_out_0_a_bits_address (_mbus_xbar_auto_anon_out_0_a_bits_address),
.auto_anon_out_0_a_bits_mask (_mbus_xbar_auto_anon_out_0_a_bits_mask),
.auto_anon_out_0_a_bits_data (_mbus_xbar_auto_anon_out_0_a_bits_data),
.auto_anon_out_0_a_bits_corrupt (_mbus_xbar_auto_anon_out_0_a_bits_corrupt),
.auto_anon_out_0_d_ready (_mbus_xbar_auto_anon_out_0_d_ready),
.auto_anon_out_0_d_valid (_picker_auto_in_0_d_valid), // @[ProbePicker.scala:69:28]
.auto_anon_out_0_d_bits_opcode (_picker_auto_in_0_d_bits_opcode), // @[ProbePicker.scala:69:28]
.auto_anon_out_0_d_bits_size (_picker_auto_in_0_d_bits_size), // @[ProbePicker.scala:69:28]
.auto_anon_out_0_d_bits_source (_picker_auto_in_0_d_bits_source), // @[ProbePicker.scala:69:28]
.auto_anon_out_0_d_bits_denied (_picker_auto_in_0_d_bits_denied), // @[ProbePicker.scala:69:28]
.auto_anon_out_0_d_bits_data (_picker_auto_in_0_d_bits_data), // @[ProbePicker.scala:69:28]
.auto_anon_out_0_d_bits_corrupt (_picker_auto_in_0_d_bits_corrupt) // @[ProbePicker.scala:69:28]
); // @[MemoryBus.scala:47:32]
ProbePicker picker ( // @[ProbePicker.scala:69:28]
.clock (_fixedClockNode_auto_anon_out_0_clock), // @[ClockGroup.scala:115:114]
.reset (_fixedClockNode_auto_anon_out_0_reset), // @[ClockGroup.scala:115:114]
.auto_in_1_a_ready (_picker_auto_in_1_a_ready),
.auto_in_1_a_valid (_mbus_xbar_auto_anon_out_1_a_valid), // @[MemoryBus.scala:47:32]
.auto_in_1_a_bits_opcode (_mbus_xbar_auto_anon_out_1_a_bits_opcode), // @[MemoryBus.scala:47:32]
.auto_in_1_a_bits_param (_mbus_xbar_auto_anon_out_1_a_bits_param), // @[MemoryBus.scala:47:32]
.auto_in_1_a_bits_size (_mbus_xbar_auto_anon_out_1_a_bits_size), // @[MemoryBus.scala:47:32]
.auto_in_1_a_bits_source (_mbus_xbar_auto_anon_out_1_a_bits_source), // @[MemoryBus.scala:47:32]
.auto_in_1_a_bits_address (_mbus_xbar_auto_anon_out_1_a_bits_address), // @[MemoryBus.scala:47:32]
.auto_in_1_a_bits_mask (_mbus_xbar_auto_anon_out_1_a_bits_mask), // @[MemoryBus.scala:47:32]
.auto_in_1_a_bits_data (_mbus_xbar_auto_anon_out_1_a_bits_data), // @[MemoryBus.scala:47:32]
.auto_in_1_a_bits_corrupt (_mbus_xbar_auto_anon_out_1_a_bits_corrupt), // @[MemoryBus.scala:47:32]
.auto_in_1_d_ready (_mbus_xbar_auto_anon_out_1_d_ready), // @[MemoryBus.scala:47:32]
.auto_in_1_d_valid (_picker_auto_in_1_d_valid),
.auto_in_1_d_bits_opcode (_picker_auto_in_1_d_bits_opcode),
.auto_in_1_d_bits_param (_picker_auto_in_1_d_bits_param),
.auto_in_1_d_bits_size (_picker_auto_in_1_d_bits_size),
.auto_in_1_d_bits_source (_picker_auto_in_1_d_bits_source),
.auto_in_1_d_bits_sink (_picker_auto_in_1_d_bits_sink),
.auto_in_1_d_bits_denied (_picker_auto_in_1_d_bits_denied),
.auto_in_1_d_bits_data (_picker_auto_in_1_d_bits_data),
.auto_in_1_d_bits_corrupt (_picker_auto_in_1_d_bits_corrupt),
.auto_in_0_a_ready (_picker_auto_in_0_a_ready),
.auto_in_0_a_valid (_mbus_xbar_auto_anon_out_0_a_valid), // @[MemoryBus.scala:47:32]
.auto_in_0_a_bits_opcode (_mbus_xbar_auto_anon_out_0_a_bits_opcode), // @[MemoryBus.scala:47:32]
.auto_in_0_a_bits_param (_mbus_xbar_auto_anon_out_0_a_bits_param), // @[MemoryBus.scala:47:32]
.auto_in_0_a_bits_size (_mbus_xbar_auto_anon_out_0_a_bits_size), // @[MemoryBus.scala:47:32]
.auto_in_0_a_bits_source (_mbus_xbar_auto_anon_out_0_a_bits_source), // @[MemoryBus.scala:47:32]
.auto_in_0_a_bits_address (_mbus_xbar_auto_anon_out_0_a_bits_address), // @[MemoryBus.scala:47:32]
.auto_in_0_a_bits_mask (_mbus_xbar_auto_anon_out_0_a_bits_mask), // @[MemoryBus.scala:47:32]
.auto_in_0_a_bits_data (_mbus_xbar_auto_anon_out_0_a_bits_data), // @[MemoryBus.scala:47:32]
.auto_in_0_a_bits_corrupt (_mbus_xbar_auto_anon_out_0_a_bits_corrupt), // @[MemoryBus.scala:47:32]
.auto_in_0_d_ready (_mbus_xbar_auto_anon_out_0_d_ready), // @[MemoryBus.scala:47:32]
.auto_in_0_d_valid (_picker_auto_in_0_d_valid),
.auto_in_0_d_bits_opcode (_picker_auto_in_0_d_bits_opcode),
.auto_in_0_d_bits_size (_picker_auto_in_0_d_bits_size),
.auto_in_0_d_bits_source (_picker_auto_in_0_d_bits_source),
.auto_in_0_d_bits_denied (_picker_auto_in_0_d_bits_denied),
.auto_in_0_d_bits_data (_picker_auto_in_0_d_bits_data),
.auto_in_0_d_bits_corrupt (_picker_auto_in_0_d_bits_corrupt),
.auto_out_1_a_ready (_buffer_1_auto_in_a_ready), // @[Buffer.scala:75:28]
.auto_out_1_a_valid (_picker_auto_out_1_a_valid),
.auto_out_1_a_bits_opcode (_picker_auto_out_1_a_bits_opcode),
.auto_out_1_a_bits_param (_picker_auto_out_1_a_bits_param),
.auto_out_1_a_bits_size (_picker_auto_out_1_a_bits_size),
.auto_out_1_a_bits_source (_picker_auto_out_1_a_bits_source),
.auto_out_1_a_bits_address (_picker_auto_out_1_a_bits_address),
.auto_out_1_a_bits_mask (_picker_auto_out_1_a_bits_mask),
.auto_out_1_a_bits_data (_picker_auto_out_1_a_bits_data),
.auto_out_1_a_bits_corrupt (_picker_auto_out_1_a_bits_corrupt),
.auto_out_1_d_ready (_picker_auto_out_1_d_ready),
.auto_out_1_d_valid (_buffer_1_auto_in_d_valid), // @[Buffer.scala:75:28]
.auto_out_1_d_bits_opcode (_buffer_1_auto_in_d_bits_opcode), // @[Buffer.scala:75:28]
.auto_out_1_d_bits_param (_buffer_1_auto_in_d_bits_param), // @[Buffer.scala:75:28]
.auto_out_1_d_bits_size (_buffer_1_auto_in_d_bits_size), // @[Buffer.scala:75:28]
.auto_out_1_d_bits_source (_buffer_1_auto_in_d_bits_source), // @[Buffer.scala:75:28]
.auto_out_1_d_bits_sink (_buffer_1_auto_in_d_bits_sink), // @[Buffer.scala:75:28]
.auto_out_1_d_bits_denied (_buffer_1_auto_in_d_bits_denied), // @[Buffer.scala:75:28]
.auto_out_1_d_bits_data (_buffer_1_auto_in_d_bits_data), // @[Buffer.scala:75:28]
.auto_out_1_d_bits_corrupt (_buffer_1_auto_in_d_bits_corrupt), // @[Buffer.scala:75:28]
.auto_out_0_a_ready (_coupler_to_memory_controller_port_named_axi4_auto_tl_in_a_ready), // @[LazyScope.scala:98:27]
.auto_out_0_a_valid (_picker_auto_out_0_a_valid),
.auto_out_0_a_bits_opcode (_picker_auto_out_0_a_bits_opcode),
.auto_out_0_a_bits_param (_picker_auto_out_0_a_bits_param),
.auto_out_0_a_bits_size (_picker_auto_out_0_a_bits_size),
.auto_out_0_a_bits_source (_picker_auto_out_0_a_bits_source),
.auto_out_0_a_bits_address (_picker_auto_out_0_a_bits_address),
.auto_out_0_a_bits_mask (_picker_auto_out_0_a_bits_mask),
.auto_out_0_a_bits_data (_picker_auto_out_0_a_bits_data),
.auto_out_0_a_bits_corrupt (_picker_auto_out_0_a_bits_corrupt),
.auto_out_0_d_ready (_picker_auto_out_0_d_ready),
.auto_out_0_d_valid (_coupler_to_memory_controller_port_named_axi4_auto_tl_in_d_valid), // @[LazyScope.scala:98:27]
.auto_out_0_d_bits_opcode (_coupler_to_memory_controller_port_named_axi4_auto_tl_in_d_bits_opcode), // @[LazyScope.scala:98:27]
.auto_out_0_d_bits_size (_coupler_to_memory_controller_port_named_axi4_auto_tl_in_d_bits_size), // @[LazyScope.scala:98:27]
.auto_out_0_d_bits_source (_coupler_to_memory_controller_port_named_axi4_auto_tl_in_d_bits_source), // @[LazyScope.scala:98:27]
.auto_out_0_d_bits_denied (_coupler_to_memory_controller_port_named_axi4_auto_tl_in_d_bits_denied), // @[LazyScope.scala:98:27]
.auto_out_0_d_bits_data (_coupler_to_memory_controller_port_named_axi4_auto_tl_in_d_bits_data), // @[LazyScope.scala:98:27]
.auto_out_0_d_bits_corrupt (_coupler_to_memory_controller_port_named_axi4_auto_tl_in_d_bits_corrupt) // @[LazyScope.scala:98:27]
); // @[ProbePicker.scala:69:28]
TLInterconnectCoupler_mbus_to_memory_controller_port_named_axi4 coupler_to_memory_controller_port_named_axi4 ( // @[LazyScope.scala:98:27]
.clock (_fixedClockNode_auto_anon_out_0_clock), // @[ClockGroup.scala:115:114]
.reset (_fixedClockNode_auto_anon_out_0_reset), // @[ClockGroup.scala:115:114]
.auto_widget_anon_in_a_ready (_coupler_to_memory_controller_port_named_axi4_auto_widget_anon_in_a_ready),
.auto_widget_anon_in_a_valid (_coupler_to_memory_controller_port_named_axi4_auto_tl_out_a_valid), // @[LazyScope.scala:98:27]
.auto_widget_anon_in_a_bits_opcode (_coupler_to_memory_controller_port_named_axi4_auto_tl_out_a_bits_opcode), // @[LazyScope.scala:98:27]
.auto_widget_anon_in_a_bits_param (_coupler_to_memory_controller_port_named_axi4_auto_tl_out_a_bits_param), // @[LazyScope.scala:98:27]
.auto_widget_anon_in_a_bits_size (_coupler_to_memory_controller_port_named_axi4_auto_tl_out_a_bits_size), // @[LazyScope.scala:98:27]
.auto_widget_anon_in_a_bits_source (_coupler_to_memory_controller_port_named_axi4_auto_tl_out_a_bits_source), // @[LazyScope.scala:98:27]
.auto_widget_anon_in_a_bits_address (_coupler_to_memory_controller_port_named_axi4_auto_tl_out_a_bits_address), // @[LazyScope.scala:98:27]
.auto_widget_anon_in_a_bits_mask (_coupler_to_memory_controller_port_named_axi4_auto_tl_out_a_bits_mask), // @[LazyScope.scala:98:27]
.auto_widget_anon_in_a_bits_data (_coupler_to_memory_controller_port_named_axi4_auto_tl_out_a_bits_data), // @[LazyScope.scala:98:27]
.auto_widget_anon_in_a_bits_corrupt (_coupler_to_memory_controller_port_named_axi4_auto_tl_out_a_bits_corrupt), // @[LazyScope.scala:98:27]
.auto_widget_anon_in_d_ready (_coupler_to_memory_controller_port_named_axi4_auto_tl_out_d_ready), // @[LazyScope.scala:98:27]
.auto_widget_anon_in_d_valid (_coupler_to_memory_controller_port_named_axi4_auto_widget_anon_in_d_valid),
.auto_widget_anon_in_d_bits_opcode (_coupler_to_memory_controller_port_named_axi4_auto_widget_anon_in_d_bits_opcode),
.auto_widget_anon_in_d_bits_size (_coupler_to_memory_controller_port_named_axi4_auto_widget_anon_in_d_bits_size),
.auto_widget_anon_in_d_bits_source (_coupler_to_memory_controller_port_named_axi4_auto_widget_anon_in_d_bits_source),
.auto_widget_anon_in_d_bits_denied (_coupler_to_memory_controller_port_named_axi4_auto_widget_anon_in_d_bits_denied),
.auto_widget_anon_in_d_bits_data (_coupler_to_memory_controller_port_named_axi4_auto_widget_anon_in_d_bits_data),
.auto_widget_anon_in_d_bits_corrupt (_coupler_to_memory_controller_port_named_axi4_auto_widget_anon_in_d_bits_corrupt),
.auto_axi4yank_out_aw_ready (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_ready),
.auto_axi4yank_out_aw_valid (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_valid),
.auto_axi4yank_out_aw_bits_id (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_id),
.auto_axi4yank_out_aw_bits_addr (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_addr),
.auto_axi4yank_out_aw_bits_len (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_len),
.auto_axi4yank_out_aw_bits_size (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_size),
.auto_axi4yank_out_aw_bits_burst (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_burst),
.auto_axi4yank_out_aw_bits_lock (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_lock),
.auto_axi4yank_out_aw_bits_cache (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_cache),
.auto_axi4yank_out_aw_bits_prot (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_prot),
.auto_axi4yank_out_aw_bits_qos (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_qos),
.auto_axi4yank_out_w_ready (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_w_ready),
.auto_axi4yank_out_w_valid (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_w_valid),
.auto_axi4yank_out_w_bits_data (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_w_bits_data),
.auto_axi4yank_out_w_bits_strb (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_w_bits_strb),
.auto_axi4yank_out_w_bits_last (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_w_bits_last),
.auto_axi4yank_out_b_ready (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_b_ready),
.auto_axi4yank_out_b_valid (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_b_valid),
.auto_axi4yank_out_b_bits_id (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_b_bits_id),
.auto_axi4yank_out_b_bits_resp (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_b_bits_resp),
.auto_axi4yank_out_ar_ready (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_ready),
.auto_axi4yank_out_ar_valid (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_valid),
.auto_axi4yank_out_ar_bits_id (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_id),
.auto_axi4yank_out_ar_bits_addr (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_addr),
.auto_axi4yank_out_ar_bits_len (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_len),
.auto_axi4yank_out_ar_bits_size (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_size),
.auto_axi4yank_out_ar_bits_burst (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_burst),
.auto_axi4yank_out_ar_bits_lock (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_lock),
.auto_axi4yank_out_ar_bits_cache (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_cache),
.auto_axi4yank_out_ar_bits_prot (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_prot),
.auto_axi4yank_out_ar_bits_qos (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_qos),
.auto_axi4yank_out_r_ready (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_r_ready),
.auto_axi4yank_out_r_valid (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_r_valid),
.auto_axi4yank_out_r_bits_id (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_r_bits_id),
.auto_axi4yank_out_r_bits_data (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_r_bits_data),
.auto_axi4yank_out_r_bits_resp (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_r_bits_resp),
.auto_axi4yank_out_r_bits_last (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_r_bits_last),
.auto_tl_in_a_ready (_coupler_to_memory_controller_port_named_axi4_auto_tl_in_a_ready),
.auto_tl_in_a_valid (_picker_auto_out_0_a_valid), // @[ProbePicker.scala:69:28]
.auto_tl_in_a_bits_opcode (_picker_auto_out_0_a_bits_opcode), // @[ProbePicker.scala:69:28]
.auto_tl_in_a_bits_param (_picker_auto_out_0_a_bits_param), // @[ProbePicker.scala:69:28]
.auto_tl_in_a_bits_size (_picker_auto_out_0_a_bits_size), // @[ProbePicker.scala:69:28]
.auto_tl_in_a_bits_source (_picker_auto_out_0_a_bits_source), // @[ProbePicker.scala:69:28]
.auto_tl_in_a_bits_address (_picker_auto_out_0_a_bits_address), // @[ProbePicker.scala:69:28]
.auto_tl_in_a_bits_mask (_picker_auto_out_0_a_bits_mask), // @[ProbePicker.scala:69:28]
.auto_tl_in_a_bits_data (_picker_auto_out_0_a_bits_data), // @[ProbePicker.scala:69:28]
.auto_tl_in_a_bits_corrupt (_picker_auto_out_0_a_bits_corrupt), // @[ProbePicker.scala:69:28]
.auto_tl_in_d_ready (_picker_auto_out_0_d_ready), // @[ProbePicker.scala:69:28]
.auto_tl_in_d_valid (_coupler_to_memory_controller_port_named_axi4_auto_tl_in_d_valid),
.auto_tl_in_d_bits_opcode (_coupler_to_memory_controller_port_named_axi4_auto_tl_in_d_bits_opcode),
.auto_tl_in_d_bits_size (_coupler_to_memory_controller_port_named_axi4_auto_tl_in_d_bits_size),
.auto_tl_in_d_bits_source (_coupler_to_memory_controller_port_named_axi4_auto_tl_in_d_bits_source),
.auto_tl_in_d_bits_denied (_coupler_to_memory_controller_port_named_axi4_auto_tl_in_d_bits_denied),
.auto_tl_in_d_bits_data (_coupler_to_memory_controller_port_named_axi4_auto_tl_in_d_bits_data),
.auto_tl_in_d_bits_corrupt (_coupler_to_memory_controller_port_named_axi4_auto_tl_in_d_bits_corrupt),
.auto_tl_out_a_ready (_coupler_to_memory_controller_port_named_axi4_auto_widget_anon_in_a_ready), // @[LazyScope.scala:98:27]
.auto_tl_out_a_valid (_coupler_to_memory_controller_port_named_axi4_auto_tl_out_a_valid),
.auto_tl_out_a_bits_opcode (_coupler_to_memory_controller_port_named_axi4_auto_tl_out_a_bits_opcode),
.auto_tl_out_a_bits_param (_coupler_to_memory_controller_port_named_axi4_auto_tl_out_a_bits_param),
.auto_tl_out_a_bits_size (_coupler_to_memory_controller_port_named_axi4_auto_tl_out_a_bits_size),
.auto_tl_out_a_bits_source (_coupler_to_memory_controller_port_named_axi4_auto_tl_out_a_bits_source),
.auto_tl_out_a_bits_address (_coupler_to_memory_controller_port_named_axi4_auto_tl_out_a_bits_address),
.auto_tl_out_a_bits_mask (_coupler_to_memory_controller_port_named_axi4_auto_tl_out_a_bits_mask),
.auto_tl_out_a_bits_data (_coupler_to_memory_controller_port_named_axi4_auto_tl_out_a_bits_data),
.auto_tl_out_a_bits_corrupt (_coupler_to_memory_controller_port_named_axi4_auto_tl_out_a_bits_corrupt),
.auto_tl_out_d_ready (_coupler_to_memory_controller_port_named_axi4_auto_tl_out_d_ready),
.auto_tl_out_d_valid (_coupler_to_memory_controller_port_named_axi4_auto_widget_anon_in_d_valid), // @[LazyScope.scala:98:27]
.auto_tl_out_d_bits_opcode (_coupler_to_memory_controller_port_named_axi4_auto_widget_anon_in_d_bits_opcode), // @[LazyScope.scala:98:27]
.auto_tl_out_d_bits_size (_coupler_to_memory_controller_port_named_axi4_auto_widget_anon_in_d_bits_size), // @[LazyScope.scala:98:27]
.auto_tl_out_d_bits_source (_coupler_to_memory_controller_port_named_axi4_auto_widget_anon_in_d_bits_source), // @[LazyScope.scala:98:27]
.auto_tl_out_d_bits_denied (_coupler_to_memory_controller_port_named_axi4_auto_widget_anon_in_d_bits_denied), // @[LazyScope.scala:98:27]
.auto_tl_out_d_bits_data (_coupler_to_memory_controller_port_named_axi4_auto_widget_anon_in_d_bits_data), // @[LazyScope.scala:98:27]
.auto_tl_out_d_bits_corrupt (_coupler_to_memory_controller_port_named_axi4_auto_widget_anon_in_d_bits_corrupt) // @[LazyScope.scala:98:27]
); // @[LazyScope.scala:98:27]
TLBuffer_a28d64s4k1z3u buffer_1 ( // @[Buffer.scala:75:28]
.clock (_fixedClockNode_auto_anon_out_0_clock), // @[ClockGroup.scala:115:114]
.reset (_fixedClockNode_auto_anon_out_0_reset), // @[ClockGroup.scala:115:114]
.auto_in_a_ready (_buffer_1_auto_in_a_ready),
.auto_in_a_valid (_picker_auto_out_1_a_valid), // @[ProbePicker.scala:69:28]
.auto_in_a_bits_opcode (_picker_auto_out_1_a_bits_opcode), // @[ProbePicker.scala:69:28]
.auto_in_a_bits_param (_picker_auto_out_1_a_bits_param), // @[ProbePicker.scala:69:28]
.auto_in_a_bits_size (_picker_auto_out_1_a_bits_size), // @[ProbePicker.scala:69:28]
.auto_in_a_bits_source (_picker_auto_out_1_a_bits_source), // @[ProbePicker.scala:69:28]
.auto_in_a_bits_address (_picker_auto_out_1_a_bits_address), // @[ProbePicker.scala:69:28]
.auto_in_a_bits_mask (_picker_auto_out_1_a_bits_mask), // @[ProbePicker.scala:69:28]
.auto_in_a_bits_data (_picker_auto_out_1_a_bits_data), // @[ProbePicker.scala:69:28]
.auto_in_a_bits_corrupt (_picker_auto_out_1_a_bits_corrupt), // @[ProbePicker.scala:69:28]
.auto_in_d_ready (_picker_auto_out_1_d_ready), // @[ProbePicker.scala:69:28]
.auto_in_d_valid (_buffer_1_auto_in_d_valid),
.auto_in_d_bits_opcode (_buffer_1_auto_in_d_bits_opcode),
.auto_in_d_bits_param (_buffer_1_auto_in_d_bits_param),
.auto_in_d_bits_size (_buffer_1_auto_in_d_bits_size),
.auto_in_d_bits_source (_buffer_1_auto_in_d_bits_source),
.auto_in_d_bits_sink (_buffer_1_auto_in_d_bits_sink),
.auto_in_d_bits_denied (_buffer_1_auto_in_d_bits_denied),
.auto_in_d_bits_data (_buffer_1_auto_in_d_bits_data),
.auto_in_d_bits_corrupt (_buffer_1_auto_in_d_bits_corrupt),
.auto_out_a_ready (auto_buffer_out_a_ready),
.auto_out_a_valid (auto_buffer_out_a_valid),
.auto_out_a_bits_opcode (auto_buffer_out_a_bits_opcode),
.auto_out_a_bits_param (auto_buffer_out_a_bits_param),
.auto_out_a_bits_size (auto_buffer_out_a_bits_size),
.auto_out_a_bits_source (auto_buffer_out_a_bits_source),
.auto_out_a_bits_address (auto_buffer_out_a_bits_address),
.auto_out_a_bits_mask (auto_buffer_out_a_bits_mask),
.auto_out_a_bits_data (auto_buffer_out_a_bits_data),
.auto_out_a_bits_corrupt (auto_buffer_out_a_bits_corrupt),
.auto_out_d_ready (auto_buffer_out_d_ready),
.auto_out_d_valid (auto_buffer_out_d_valid),
.auto_out_d_bits_opcode (auto_buffer_out_d_bits_opcode),
.auto_out_d_bits_param (auto_buffer_out_d_bits_param),
.auto_out_d_bits_size (auto_buffer_out_d_bits_size),
.auto_out_d_bits_source (auto_buffer_out_d_bits_source),
.auto_out_d_bits_sink (auto_buffer_out_d_bits_sink),
.auto_out_d_bits_denied (auto_buffer_out_d_bits_denied),
.auto_out_d_bits_data (auto_buffer_out_d_bits_data),
.auto_out_d_bits_corrupt (auto_buffer_out_d_bits_corrupt)
); // @[Buffer.scala:75:28]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File util.scala:
//******************************************************************************
// Copyright (c) 2015 - 2019, The Regents of the University of California (Regents).
// All Rights Reserved. See LICENSE and LICENSE.SiFive for license details.
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
// Utility Functions
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
package boom.v4.util
import chisel3._
import chisel3.util._
import freechips.rocketchip.rocket.Instructions._
import freechips.rocketchip.rocket._
import freechips.rocketchip.util.{Str}
import org.chipsalliance.cde.config.{Parameters}
import freechips.rocketchip.tile.{TileKey}
import boom.v4.common.{MicroOp}
import boom.v4.exu.{BrUpdateInfo}
/**
* Object to XOR fold a input register of fullLength into a compressedLength.
*/
object Fold
{
def apply(input: UInt, compressedLength: Int, fullLength: Int): UInt = {
val clen = compressedLength
val hlen = fullLength
if (hlen <= clen) {
input
} else {
var res = 0.U(clen.W)
var remaining = input.asUInt
for (i <- 0 to hlen-1 by clen) {
val len = if (i + clen > hlen ) (hlen - i) else clen
require(len > 0)
res = res(clen-1,0) ^ remaining(len-1,0)
remaining = remaining >> len.U
}
res
}
}
}
/**
* Object to check if MicroOp was killed due to a branch mispredict.
* Uses "Fast" branch masks
*/
object IsKilledByBranch
{
def apply(brupdate: BrUpdateInfo, flush: Bool, uop: MicroOp): Bool = {
return apply(brupdate, flush, uop.br_mask)
}
def apply(brupdate: BrUpdateInfo, flush: Bool, uop_mask: UInt): Bool = {
return maskMatch(brupdate.b1.mispredict_mask, uop_mask) || flush
}
def apply[T <: boom.v4.common.HasBoomUOP](brupdate: BrUpdateInfo, flush: Bool, bundle: T): Bool = {
return apply(brupdate, flush, bundle.uop)
}
def apply[T <: boom.v4.common.HasBoomUOP](brupdate: BrUpdateInfo, flush: Bool, bundle: Valid[T]): Bool = {
return apply(brupdate, flush, bundle.bits)
}
}
/**
* Object to return new MicroOp with a new BR mask given a MicroOp mask
* and old BR mask.
*/
object GetNewUopAndBrMask
{
def apply(uop: MicroOp, brupdate: BrUpdateInfo)
(implicit p: Parameters): MicroOp = {
val newuop = WireInit(uop)
newuop.br_mask := uop.br_mask & ~brupdate.b1.resolve_mask
newuop
}
}
/**
* Object to return a BR mask given a MicroOp mask and old BR mask.
*/
object GetNewBrMask
{
def apply(brupdate: BrUpdateInfo, uop: MicroOp): UInt = {
return uop.br_mask & ~brupdate.b1.resolve_mask
}
def apply(brupdate: BrUpdateInfo, br_mask: UInt): UInt = {
return br_mask & ~brupdate.b1.resolve_mask
}
}
object UpdateBrMask
{
def apply(brupdate: BrUpdateInfo, uop: MicroOp): MicroOp = {
val out = WireInit(uop)
out.br_mask := GetNewBrMask(brupdate, uop)
out
}
def apply[T <: boom.v4.common.HasBoomUOP](brupdate: BrUpdateInfo, bundle: T): T = {
val out = WireInit(bundle)
out.uop.br_mask := GetNewBrMask(brupdate, bundle.uop.br_mask)
out
}
def apply[T <: boom.v4.common.HasBoomUOP](brupdate: BrUpdateInfo, flush: Bool, bundle: Valid[T]): Valid[T] = {
val out = WireInit(bundle)
out.bits.uop.br_mask := GetNewBrMask(brupdate, bundle.bits.uop.br_mask)
out.valid := bundle.valid && !IsKilledByBranch(brupdate, flush, bundle.bits.uop.br_mask)
out
}
}
/**
* Object to check if at least 1 bit matches in two masks
*/
object maskMatch
{
def apply(msk1: UInt, msk2: UInt): Bool = (msk1 & msk2) =/= 0.U
}
/**
* Object to clear one bit in a mask given an index
*/
object clearMaskBit
{
def apply(msk: UInt, idx: UInt): UInt = (msk & ~(1.U << idx))(msk.getWidth-1, 0)
}
/**
* Object to shift a register over by one bit and concat a new one
*/
object PerformShiftRegister
{
def apply(reg_val: UInt, new_bit: Bool): UInt = {
reg_val := Cat(reg_val(reg_val.getWidth-1, 0).asUInt, new_bit.asUInt).asUInt
reg_val
}
}
/**
* Object to shift a register over by one bit, wrapping the top bit around to the bottom
* (XOR'ed with a new-bit), and evicting a bit at index HLEN.
* This is used to simulate a longer HLEN-width shift register that is folded
* down to a compressed CLEN.
*/
object PerformCircularShiftRegister
{
def apply(csr: UInt, new_bit: Bool, evict_bit: Bool, hlen: Int, clen: Int): UInt = {
val carry = csr(clen-1)
val newval = Cat(csr, new_bit ^ carry) ^ (evict_bit << (hlen % clen).U)
newval
}
}
/**
* Object to increment an input value, wrapping it if
* necessary.
*/
object WrapAdd
{
// "n" is the number of increments, so we wrap at n-1.
def apply(value: UInt, amt: UInt, n: Int): UInt = {
if (isPow2(n)) {
(value + amt)(log2Ceil(n)-1,0)
} else {
val sum = Cat(0.U(1.W), value) + Cat(0.U(1.W), amt)
Mux(sum >= n.U,
sum - n.U,
sum)
}
}
}
/**
* Object to decrement an input value, wrapping it if
* necessary.
*/
object WrapSub
{
// "n" is the number of increments, so we wrap to n-1.
def apply(value: UInt, amt: Int, n: Int): UInt = {
if (isPow2(n)) {
(value - amt.U)(log2Ceil(n)-1,0)
} else {
val v = Cat(0.U(1.W), value)
val b = Cat(0.U(1.W), amt.U)
Mux(value >= amt.U,
value - amt.U,
n.U - amt.U + value)
}
}
}
/**
* Object to increment an input value, wrapping it if
* necessary.
*/
object WrapInc
{
// "n" is the number of increments, so we wrap at n-1.
def apply(value: UInt, n: Int): UInt = {
if (isPow2(n)) {
(value + 1.U)(log2Ceil(n)-1,0)
} else {
val wrap = (value === (n-1).U)
Mux(wrap, 0.U, value + 1.U)
}
}
}
/**
* Object to decrement an input value, wrapping it if
* necessary.
*/
object WrapDec
{
// "n" is the number of increments, so we wrap at n-1.
def apply(value: UInt, n: Int): UInt = {
if (isPow2(n)) {
(value - 1.U)(log2Ceil(n)-1,0)
} else {
val wrap = (value === 0.U)
Mux(wrap, (n-1).U, value - 1.U)
}
}
}
/**
* Object to mask off lower bits of a PC to align to a "b"
* Byte boundary.
*/
object AlignPCToBoundary
{
def apply(pc: UInt, b: Int): UInt = {
// Invert for scenario where pc longer than b
// (which would clear all bits above size(b)).
~(~pc | (b-1).U)
}
}
/**
* Object to rotate a signal left by one
*/
object RotateL1
{
def apply(signal: UInt): UInt = {
val w = signal.getWidth
val out = Cat(signal(w-2,0), signal(w-1))
return out
}
}
/**
* Object to sext a value to a particular length.
*/
object Sext
{
def apply(x: UInt, length: Int): UInt = {
if (x.getWidth == length) return x
else return Cat(Fill(length-x.getWidth, x(x.getWidth-1)), x)
}
}
/**
* Object to translate from BOOM's special "packed immediate" to a 32b signed immediate
* Asking for U-type gives it shifted up 12 bits.
*/
object ImmGen
{
import boom.v4.common.{LONGEST_IMM_SZ, IS_B, IS_I, IS_J, IS_S, IS_U, IS_N}
def apply(i: UInt, isel: UInt): UInt = {
val ip = Mux(isel === IS_N, 0.U(LONGEST_IMM_SZ.W), i)
val sign = ip(LONGEST_IMM_SZ-1).asSInt
val i30_20 = Mux(isel === IS_U, ip(18,8).asSInt, sign)
val i19_12 = Mux(isel === IS_U || isel === IS_J, ip(7,0).asSInt, sign)
val i11 = Mux(isel === IS_U, 0.S,
Mux(isel === IS_J || isel === IS_B, ip(8).asSInt, sign))
val i10_5 = Mux(isel === IS_U, 0.S, ip(18,14).asSInt)
val i4_1 = Mux(isel === IS_U, 0.S, ip(13,9).asSInt)
val i0 = Mux(isel === IS_S || isel === IS_I, ip(8).asSInt, 0.S)
return Cat(sign, i30_20, i19_12, i11, i10_5, i4_1, i0)
}
}
/**
* Object to see if an instruction is a JALR.
*/
object DebugIsJALR
{
def apply(inst: UInt): Bool = {
// TODO Chisel not sure why this won't compile
// val is_jalr = rocket.DecodeLogic(inst, List(Bool(false)),
// Array(
// JALR -> Bool(true)))
inst(6,0) === "b1100111".U
}
}
/**
* Object to take an instruction and output its branch or jal target. Only used
* for a debug assert (no where else would we jump straight from instruction
* bits to a target).
*/
object DebugGetBJImm
{
def apply(inst: UInt): UInt = {
// TODO Chisel not sure why this won't compile
//val csignals =
//rocket.DecodeLogic(inst,
// List(Bool(false), Bool(false)),
// Array(
// BEQ -> List(Bool(true ), Bool(false)),
// BNE -> List(Bool(true ), Bool(false)),
// BGE -> List(Bool(true ), Bool(false)),
// BGEU -> List(Bool(true ), Bool(false)),
// BLT -> List(Bool(true ), Bool(false)),
// BLTU -> List(Bool(true ), Bool(false))
// ))
//val is_br :: nothing :: Nil = csignals
val is_br = (inst(6,0) === "b1100011".U)
val br_targ = Cat(Fill(12, inst(31)), Fill(8,inst(31)), inst(7), inst(30,25), inst(11,8), 0.U(1.W))
val jal_targ= Cat(Fill(12, inst(31)), inst(19,12), inst(20), inst(30,25), inst(24,21), 0.U(1.W))
Mux(is_br, br_targ, jal_targ)
}
}
/**
* Object to return the lowest bit position after the head.
*/
object AgePriorityEncoder
{
def apply(in: Seq[Bool], head: UInt): UInt = {
val n = in.size
val width = log2Ceil(in.size)
val n_padded = 1 << width
val temp_vec = (0 until n_padded).map(i => if (i < n) in(i) && i.U >= head else false.B) ++ in
val idx = PriorityEncoder(temp_vec)
idx(width-1, 0) //discard msb
}
}
/**
* Object to determine whether queue
* index i0 is older than index i1.
*/
object IsOlder
{
def apply(i0: UInt, i1: UInt, head: UInt) = ((i0 < i1) ^ (i0 < head) ^ (i1 < head))
}
object IsYoungerMask
{
def apply(i: UInt, head: UInt, n: Integer): UInt = {
val hi_mask = ~MaskLower(UIntToOH(i)(n-1,0))
val lo_mask = ~MaskUpper(UIntToOH(head)(n-1,0))
Mux(i < head, hi_mask & lo_mask, hi_mask | lo_mask)(n-1,0)
}
}
/**
* Set all bits at or below the highest order '1'.
*/
object MaskLower
{
def apply(in: UInt) = {
val n = in.getWidth
(0 until n).map(i => in >> i.U).reduce(_|_)
}
}
/**
* Set all bits at or above the lowest order '1'.
*/
object MaskUpper
{
def apply(in: UInt) = {
val n = in.getWidth
(0 until n).map(i => (in << i.U)(n-1,0)).reduce(_|_)
}
}
/**
* Transpose a matrix of Chisel Vecs.
*/
object Transpose
{
def apply[T <: chisel3.Data](in: Vec[Vec[T]]) = {
val n = in(0).size
VecInit((0 until n).map(i => VecInit(in.map(row => row(i)))))
}
}
/**
* N-wide one-hot priority encoder.
*/
object SelectFirstN
{
def apply(in: UInt, n: Int) = {
val sels = Wire(Vec(n, UInt(in.getWidth.W)))
var mask = in
for (i <- 0 until n) {
sels(i) := PriorityEncoderOH(mask)
mask = mask & ~sels(i)
}
sels
}
}
/**
* Connect the first k of n valid input interfaces to k output interfaces.
*/
class Compactor[T <: chisel3.Data](n: Int, k: Int, gen: T) extends Module
{
require(n >= k)
val io = IO(new Bundle {
val in = Vec(n, Flipped(DecoupledIO(gen)))
val out = Vec(k, DecoupledIO(gen))
})
if (n == k) {
io.out <> io.in
} else {
val counts = io.in.map(_.valid).scanLeft(1.U(k.W)) ((c,e) => Mux(e, (c<<1)(k-1,0), c))
val sels = Transpose(VecInit(counts map (c => VecInit(c.asBools)))) map (col =>
(col zip io.in.map(_.valid)) map {case (c,v) => c && v})
val in_readys = counts map (row => (row.asBools zip io.out.map(_.ready)) map {case (c,r) => c && r} reduce (_||_))
val out_valids = sels map (col => col.reduce(_||_))
val out_data = sels map (s => Mux1H(s, io.in.map(_.bits)))
in_readys zip io.in foreach {case (r,i) => i.ready := r}
out_valids zip out_data zip io.out foreach {case ((v,d),o) => o.valid := v; o.bits := d}
}
}
/**
* Create a queue that can be killed with a branch kill signal.
* Assumption: enq.valid only high if not killed by branch (so don't check IsKilled on io.enq).
*/
class BranchKillableQueue[T <: boom.v4.common.HasBoomUOP](gen: T, entries: Int, flush_fn: boom.v4.common.MicroOp => Bool = u => true.B, fastDeq: Boolean = false)
(implicit p: org.chipsalliance.cde.config.Parameters)
extends boom.v4.common.BoomModule()(p)
with boom.v4.common.HasBoomCoreParameters
{
val io = IO(new Bundle {
val enq = Flipped(Decoupled(gen))
val deq = Decoupled(gen)
val brupdate = Input(new BrUpdateInfo())
val flush = Input(Bool())
val empty = Output(Bool())
val count = Output(UInt(log2Ceil(entries).W))
})
if (fastDeq && entries > 1) {
// Pipeline dequeue selection so the mux gets an entire cycle
val main = Module(new BranchKillableQueue(gen, entries-1, flush_fn, false))
val out_reg = Reg(gen)
val out_valid = RegInit(false.B)
val out_uop = Reg(new MicroOp)
main.io.enq <> io.enq
main.io.brupdate := io.brupdate
main.io.flush := io.flush
io.empty := main.io.empty && !out_valid
io.count := main.io.count + out_valid
io.deq.valid := out_valid
io.deq.bits := out_reg
io.deq.bits.uop := out_uop
out_uop := UpdateBrMask(io.brupdate, out_uop)
out_valid := out_valid && !IsKilledByBranch(io.brupdate, false.B, out_uop) && !(io.flush && flush_fn(out_uop))
main.io.deq.ready := false.B
when (io.deq.fire || !out_valid) {
out_valid := main.io.deq.valid && !IsKilledByBranch(io.brupdate, false.B, main.io.deq.bits.uop) && !(io.flush && flush_fn(main.io.deq.bits.uop))
out_reg := main.io.deq.bits
out_uop := UpdateBrMask(io.brupdate, main.io.deq.bits.uop)
main.io.deq.ready := true.B
}
} else {
val ram = Mem(entries, gen)
val valids = RegInit(VecInit(Seq.fill(entries) {false.B}))
val uops = Reg(Vec(entries, new MicroOp))
val enq_ptr = Counter(entries)
val deq_ptr = Counter(entries)
val maybe_full = RegInit(false.B)
val ptr_match = enq_ptr.value === deq_ptr.value
io.empty := ptr_match && !maybe_full
val full = ptr_match && maybe_full
val do_enq = WireInit(io.enq.fire && !IsKilledByBranch(io.brupdate, false.B, io.enq.bits.uop) && !(io.flush && flush_fn(io.enq.bits.uop)))
val do_deq = WireInit((io.deq.ready || !valids(deq_ptr.value)) && !io.empty)
for (i <- 0 until entries) {
val mask = uops(i).br_mask
val uop = uops(i)
valids(i) := valids(i) && !IsKilledByBranch(io.brupdate, false.B, mask) && !(io.flush && flush_fn(uop))
when (valids(i)) {
uops(i).br_mask := GetNewBrMask(io.brupdate, mask)
}
}
when (do_enq) {
ram(enq_ptr.value) := io.enq.bits
valids(enq_ptr.value) := true.B
uops(enq_ptr.value) := io.enq.bits.uop
uops(enq_ptr.value).br_mask := GetNewBrMask(io.brupdate, io.enq.bits.uop)
enq_ptr.inc()
}
when (do_deq) {
valids(deq_ptr.value) := false.B
deq_ptr.inc()
}
when (do_enq =/= do_deq) {
maybe_full := do_enq
}
io.enq.ready := !full
val out = Wire(gen)
out := ram(deq_ptr.value)
out.uop := uops(deq_ptr.value)
io.deq.valid := !io.empty && valids(deq_ptr.value)
io.deq.bits := out
val ptr_diff = enq_ptr.value - deq_ptr.value
if (isPow2(entries)) {
io.count := Cat(maybe_full && ptr_match, ptr_diff)
}
else {
io.count := Mux(ptr_match,
Mux(maybe_full,
entries.asUInt, 0.U),
Mux(deq_ptr.value > enq_ptr.value,
entries.asUInt + ptr_diff, ptr_diff))
}
}
}
// ------------------------------------------
// Printf helper functions
// ------------------------------------------
object BoolToChar
{
/**
* Take in a Chisel Bool and convert it into a Str
* based on the Chars given
*
* @param c_bool Chisel Bool
* @param trueChar Scala Char if bool is true
* @param falseChar Scala Char if bool is false
* @return UInt ASCII Char for "trueChar" or "falseChar"
*/
def apply(c_bool: Bool, trueChar: Char, falseChar: Char = '-'): UInt = {
Mux(c_bool, Str(trueChar), Str(falseChar))
}
}
object CfiTypeToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param cfi_type specific cfi type
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(cfi_type: UInt) = {
val strings = Seq("----", "BR ", "JAL ", "JALR")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(cfi_type)
}
}
object BpdTypeToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param bpd_type specific bpd type
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(bpd_type: UInt) = {
val strings = Seq("BR ", "JUMP", "----", "RET ", "----", "CALL", "----", "----")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(bpd_type)
}
}
object RobTypeToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param rob_type specific rob type
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(rob_type: UInt) = {
val strings = Seq("RST", "NML", "RBK", " WT")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(rob_type)
}
}
object XRegToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param xreg specific register number
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(xreg: UInt) = {
val strings = Seq(" x0", " ra", " sp", " gp",
" tp", " t0", " t1", " t2",
" s0", " s1", " a0", " a1",
" a2", " a3", " a4", " a5",
" a6", " a7", " s2", " s3",
" s4", " s5", " s6", " s7",
" s8", " s9", "s10", "s11",
" t3", " t4", " t5", " t6")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(xreg)
}
}
object FPRegToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param fpreg specific register number
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(fpreg: UInt) = {
val strings = Seq(" ft0", " ft1", " ft2", " ft3",
" ft4", " ft5", " ft6", " ft7",
" fs0", " fs1", " fa0", " fa1",
" fa2", " fa3", " fa4", " fa5",
" fa6", " fa7", " fs2", " fs3",
" fs4", " fs5", " fs6", " fs7",
" fs8", " fs9", "fs10", "fs11",
" ft8", " ft9", "ft10", "ft11")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(fpreg)
}
}
object BoomCoreStringPrefix
{
/**
* Add prefix to BOOM strings (currently only adds the hartId)
*
* @param strs list of strings
* @return String combining the list with the prefix per line
*/
def apply(strs: String*)(implicit p: Parameters) = {
val prefix = "[C" + s"${p(TileKey).tileId}" + "] "
strs.map(str => prefix + str + "\n").mkString("")
}
}
class BranchKillablePipeline[T <: boom.v4.common.HasBoomUOP](gen: T, stages: Int)
(implicit p: org.chipsalliance.cde.config.Parameters)
extends boom.v4.common.BoomModule()(p)
with boom.v4.common.HasBoomCoreParameters
{
val io = IO(new Bundle {
val req = Input(Valid(gen))
val flush = Input(Bool())
val brupdate = Input(new BrUpdateInfo)
val resp = Output(Vec(stages, Valid(gen)))
})
require(stages > 0)
val uops = Reg(Vec(stages, Valid(gen)))
uops(0).valid := io.req.valid && !IsKilledByBranch(io.brupdate, io.flush, io.req.bits)
uops(0).bits := UpdateBrMask(io.brupdate, io.req.bits)
for (i <- 1 until stages) {
uops(i).valid := uops(i-1).valid && !IsKilledByBranch(io.brupdate, io.flush, uops(i-1).bits)
uops(i).bits := UpdateBrMask(io.brupdate, uops(i-1).bits)
}
for (i <- 0 until stages) { when (reset.asBool) { uops(i).valid := false.B } }
io.resp := uops
}
| module BranchKillableQueue_9( // @[util.scala:458:7]
input clock, // @[util.scala:458:7]
input reset, // @[util.scala:458:7]
output io_enq_ready, // @[util.scala:463:14]
input io_enq_valid, // @[util.scala:463:14]
input [31:0] io_enq_bits_uop_inst, // @[util.scala:463:14]
input [31:0] io_enq_bits_uop_debug_inst, // @[util.scala:463:14]
input io_enq_bits_uop_is_rvc, // @[util.scala:463:14]
input [39:0] io_enq_bits_uop_debug_pc, // @[util.scala:463:14]
input io_enq_bits_uop_iq_type_0, // @[util.scala:463:14]
input io_enq_bits_uop_iq_type_1, // @[util.scala:463:14]
input io_enq_bits_uop_iq_type_2, // @[util.scala:463:14]
input io_enq_bits_uop_iq_type_3, // @[util.scala:463:14]
input io_enq_bits_uop_fu_code_0, // @[util.scala:463:14]
input io_enq_bits_uop_fu_code_1, // @[util.scala:463:14]
input io_enq_bits_uop_fu_code_2, // @[util.scala:463:14]
input io_enq_bits_uop_fu_code_3, // @[util.scala:463:14]
input io_enq_bits_uop_fu_code_4, // @[util.scala:463:14]
input io_enq_bits_uop_fu_code_5, // @[util.scala:463:14]
input io_enq_bits_uop_fu_code_6, // @[util.scala:463:14]
input io_enq_bits_uop_fu_code_7, // @[util.scala:463:14]
input io_enq_bits_uop_fu_code_8, // @[util.scala:463:14]
input io_enq_bits_uop_fu_code_9, // @[util.scala:463:14]
input io_enq_bits_uop_iw_issued, // @[util.scala:463:14]
input io_enq_bits_uop_iw_issued_partial_agen, // @[util.scala:463:14]
input io_enq_bits_uop_iw_issued_partial_dgen, // @[util.scala:463:14]
input [2:0] io_enq_bits_uop_iw_p1_speculative_child, // @[util.scala:463:14]
input [2:0] io_enq_bits_uop_iw_p2_speculative_child, // @[util.scala:463:14]
input io_enq_bits_uop_iw_p1_bypass_hint, // @[util.scala:463:14]
input io_enq_bits_uop_iw_p2_bypass_hint, // @[util.scala:463:14]
input io_enq_bits_uop_iw_p3_bypass_hint, // @[util.scala:463:14]
input [2:0] io_enq_bits_uop_dis_col_sel, // @[util.scala:463:14]
input [15:0] io_enq_bits_uop_br_mask, // @[util.scala:463:14]
input [3:0] io_enq_bits_uop_br_tag, // @[util.scala:463:14]
input [3:0] io_enq_bits_uop_br_type, // @[util.scala:463:14]
input io_enq_bits_uop_is_sfb, // @[util.scala:463:14]
input io_enq_bits_uop_is_fence, // @[util.scala:463:14]
input io_enq_bits_uop_is_fencei, // @[util.scala:463:14]
input io_enq_bits_uop_is_sfence, // @[util.scala:463:14]
input io_enq_bits_uop_is_amo, // @[util.scala:463:14]
input io_enq_bits_uop_is_eret, // @[util.scala:463:14]
input io_enq_bits_uop_is_sys_pc2epc, // @[util.scala:463:14]
input io_enq_bits_uop_is_rocc, // @[util.scala:463:14]
input io_enq_bits_uop_is_mov, // @[util.scala:463:14]
input [4:0] io_enq_bits_uop_ftq_idx, // @[util.scala:463:14]
input io_enq_bits_uop_edge_inst, // @[util.scala:463:14]
input [5:0] io_enq_bits_uop_pc_lob, // @[util.scala:463:14]
input io_enq_bits_uop_taken, // @[util.scala:463:14]
input io_enq_bits_uop_imm_rename, // @[util.scala:463:14]
input [2:0] io_enq_bits_uop_imm_sel, // @[util.scala:463:14]
input [4:0] io_enq_bits_uop_pimm, // @[util.scala:463:14]
input [19:0] io_enq_bits_uop_imm_packed, // @[util.scala:463:14]
input [1:0] io_enq_bits_uop_op1_sel, // @[util.scala:463:14]
input [2:0] io_enq_bits_uop_op2_sel, // @[util.scala:463:14]
input io_enq_bits_uop_fp_ctrl_ldst, // @[util.scala:463:14]
input io_enq_bits_uop_fp_ctrl_wen, // @[util.scala:463:14]
input io_enq_bits_uop_fp_ctrl_ren1, // @[util.scala:463:14]
input io_enq_bits_uop_fp_ctrl_ren2, // @[util.scala:463:14]
input io_enq_bits_uop_fp_ctrl_ren3, // @[util.scala:463:14]
input io_enq_bits_uop_fp_ctrl_swap12, // @[util.scala:463:14]
input io_enq_bits_uop_fp_ctrl_swap23, // @[util.scala:463:14]
input [1:0] io_enq_bits_uop_fp_ctrl_typeTagIn, // @[util.scala:463:14]
input [1:0] io_enq_bits_uop_fp_ctrl_typeTagOut, // @[util.scala:463:14]
input io_enq_bits_uop_fp_ctrl_fromint, // @[util.scala:463:14]
input io_enq_bits_uop_fp_ctrl_toint, // @[util.scala:463:14]
input io_enq_bits_uop_fp_ctrl_fastpipe, // @[util.scala:463:14]
input io_enq_bits_uop_fp_ctrl_fma, // @[util.scala:463:14]
input io_enq_bits_uop_fp_ctrl_div, // @[util.scala:463:14]
input io_enq_bits_uop_fp_ctrl_sqrt, // @[util.scala:463:14]
input io_enq_bits_uop_fp_ctrl_wflags, // @[util.scala:463:14]
input io_enq_bits_uop_fp_ctrl_vec, // @[util.scala:463:14]
input [6:0] io_enq_bits_uop_rob_idx, // @[util.scala:463:14]
input [4:0] io_enq_bits_uop_ldq_idx, // @[util.scala:463:14]
input [4:0] io_enq_bits_uop_stq_idx, // @[util.scala:463:14]
input [1:0] io_enq_bits_uop_rxq_idx, // @[util.scala:463:14]
input [6:0] io_enq_bits_uop_pdst, // @[util.scala:463:14]
input [6:0] io_enq_bits_uop_prs1, // @[util.scala:463:14]
input [6:0] io_enq_bits_uop_prs2, // @[util.scala:463:14]
input [6:0] io_enq_bits_uop_prs3, // @[util.scala:463:14]
input [4:0] io_enq_bits_uop_ppred, // @[util.scala:463:14]
input io_enq_bits_uop_prs1_busy, // @[util.scala:463:14]
input io_enq_bits_uop_prs2_busy, // @[util.scala:463:14]
input io_enq_bits_uop_prs3_busy, // @[util.scala:463:14]
input io_enq_bits_uop_ppred_busy, // @[util.scala:463:14]
input [6:0] io_enq_bits_uop_stale_pdst, // @[util.scala:463:14]
input io_enq_bits_uop_exception, // @[util.scala:463:14]
input [63:0] io_enq_bits_uop_exc_cause, // @[util.scala:463:14]
input [4:0] io_enq_bits_uop_mem_cmd, // @[util.scala:463:14]
input [1:0] io_enq_bits_uop_mem_size, // @[util.scala:463:14]
input io_enq_bits_uop_mem_signed, // @[util.scala:463:14]
input io_enq_bits_uop_uses_ldq, // @[util.scala:463:14]
input io_enq_bits_uop_uses_stq, // @[util.scala:463:14]
input io_enq_bits_uop_is_unique, // @[util.scala:463:14]
input io_enq_bits_uop_flush_on_commit, // @[util.scala:463:14]
input [2:0] io_enq_bits_uop_csr_cmd, // @[util.scala:463:14]
input io_enq_bits_uop_ldst_is_rs1, // @[util.scala:463:14]
input [5:0] io_enq_bits_uop_ldst, // @[util.scala:463:14]
input [5:0] io_enq_bits_uop_lrs1, // @[util.scala:463:14]
input [5:0] io_enq_bits_uop_lrs2, // @[util.scala:463:14]
input [5:0] io_enq_bits_uop_lrs3, // @[util.scala:463:14]
input [1:0] io_enq_bits_uop_dst_rtype, // @[util.scala:463:14]
input [1:0] io_enq_bits_uop_lrs1_rtype, // @[util.scala:463:14]
input [1:0] io_enq_bits_uop_lrs2_rtype, // @[util.scala:463:14]
input io_enq_bits_uop_frs3_en, // @[util.scala:463:14]
input io_enq_bits_uop_fcn_dw, // @[util.scala:463:14]
input [4:0] io_enq_bits_uop_fcn_op, // @[util.scala:463:14]
input io_enq_bits_uop_fp_val, // @[util.scala:463:14]
input [2:0] io_enq_bits_uop_fp_rm, // @[util.scala:463:14]
input [1:0] io_enq_bits_uop_fp_typ, // @[util.scala:463:14]
input io_enq_bits_uop_xcpt_pf_if, // @[util.scala:463:14]
input io_enq_bits_uop_xcpt_ae_if, // @[util.scala:463:14]
input io_enq_bits_uop_xcpt_ma_if, // @[util.scala:463:14]
input io_enq_bits_uop_bp_debug_if, // @[util.scala:463:14]
input io_enq_bits_uop_bp_xcpt_if, // @[util.scala:463:14]
input [2:0] io_enq_bits_uop_debug_fsrc, // @[util.scala:463:14]
input [2:0] io_enq_bits_uop_debug_tsrc, // @[util.scala:463:14]
input [64:0] io_enq_bits_data, // @[util.scala:463:14]
input io_enq_bits_fflags_valid, // @[util.scala:463:14]
input [4:0] io_enq_bits_fflags_bits, // @[util.scala:463:14]
input io_deq_ready, // @[util.scala:463:14]
output io_deq_valid, // @[util.scala:463:14]
output [31:0] io_deq_bits_uop_inst, // @[util.scala:463:14]
output [31:0] io_deq_bits_uop_debug_inst, // @[util.scala:463:14]
output io_deq_bits_uop_is_rvc, // @[util.scala:463:14]
output [39:0] io_deq_bits_uop_debug_pc, // @[util.scala:463:14]
output io_deq_bits_uop_iq_type_0, // @[util.scala:463:14]
output io_deq_bits_uop_iq_type_1, // @[util.scala:463:14]
output io_deq_bits_uop_iq_type_2, // @[util.scala:463:14]
output io_deq_bits_uop_iq_type_3, // @[util.scala:463:14]
output io_deq_bits_uop_fu_code_0, // @[util.scala:463:14]
output io_deq_bits_uop_fu_code_1, // @[util.scala:463:14]
output io_deq_bits_uop_fu_code_2, // @[util.scala:463:14]
output io_deq_bits_uop_fu_code_3, // @[util.scala:463:14]
output io_deq_bits_uop_fu_code_4, // @[util.scala:463:14]
output io_deq_bits_uop_fu_code_5, // @[util.scala:463:14]
output io_deq_bits_uop_fu_code_6, // @[util.scala:463:14]
output io_deq_bits_uop_fu_code_7, // @[util.scala:463:14]
output io_deq_bits_uop_fu_code_8, // @[util.scala:463:14]
output io_deq_bits_uop_fu_code_9, // @[util.scala:463:14]
output io_deq_bits_uop_iw_issued, // @[util.scala:463:14]
output io_deq_bits_uop_iw_issued_partial_agen, // @[util.scala:463:14]
output io_deq_bits_uop_iw_issued_partial_dgen, // @[util.scala:463:14]
output [2:0] io_deq_bits_uop_iw_p1_speculative_child, // @[util.scala:463:14]
output [2:0] io_deq_bits_uop_iw_p2_speculative_child, // @[util.scala:463:14]
output io_deq_bits_uop_iw_p1_bypass_hint, // @[util.scala:463:14]
output io_deq_bits_uop_iw_p2_bypass_hint, // @[util.scala:463:14]
output io_deq_bits_uop_iw_p3_bypass_hint, // @[util.scala:463:14]
output [2:0] io_deq_bits_uop_dis_col_sel, // @[util.scala:463:14]
output [15:0] io_deq_bits_uop_br_mask, // @[util.scala:463:14]
output [3:0] io_deq_bits_uop_br_tag, // @[util.scala:463:14]
output [3:0] io_deq_bits_uop_br_type, // @[util.scala:463:14]
output io_deq_bits_uop_is_sfb, // @[util.scala:463:14]
output io_deq_bits_uop_is_fence, // @[util.scala:463:14]
output io_deq_bits_uop_is_fencei, // @[util.scala:463:14]
output io_deq_bits_uop_is_sfence, // @[util.scala:463:14]
output io_deq_bits_uop_is_amo, // @[util.scala:463:14]
output io_deq_bits_uop_is_eret, // @[util.scala:463:14]
output io_deq_bits_uop_is_sys_pc2epc, // @[util.scala:463:14]
output io_deq_bits_uop_is_rocc, // @[util.scala:463:14]
output io_deq_bits_uop_is_mov, // @[util.scala:463:14]
output [4:0] io_deq_bits_uop_ftq_idx, // @[util.scala:463:14]
output io_deq_bits_uop_edge_inst, // @[util.scala:463:14]
output [5:0] io_deq_bits_uop_pc_lob, // @[util.scala:463:14]
output io_deq_bits_uop_taken, // @[util.scala:463:14]
output io_deq_bits_uop_imm_rename, // @[util.scala:463:14]
output [2:0] io_deq_bits_uop_imm_sel, // @[util.scala:463:14]
output [4:0] io_deq_bits_uop_pimm, // @[util.scala:463:14]
output [19:0] io_deq_bits_uop_imm_packed, // @[util.scala:463:14]
output [1:0] io_deq_bits_uop_op1_sel, // @[util.scala:463:14]
output [2:0] io_deq_bits_uop_op2_sel, // @[util.scala:463:14]
output io_deq_bits_uop_fp_ctrl_ldst, // @[util.scala:463:14]
output io_deq_bits_uop_fp_ctrl_wen, // @[util.scala:463:14]
output io_deq_bits_uop_fp_ctrl_ren1, // @[util.scala:463:14]
output io_deq_bits_uop_fp_ctrl_ren2, // @[util.scala:463:14]
output io_deq_bits_uop_fp_ctrl_ren3, // @[util.scala:463:14]
output io_deq_bits_uop_fp_ctrl_swap12, // @[util.scala:463:14]
output io_deq_bits_uop_fp_ctrl_swap23, // @[util.scala:463:14]
output [1:0] io_deq_bits_uop_fp_ctrl_typeTagIn, // @[util.scala:463:14]
output [1:0] io_deq_bits_uop_fp_ctrl_typeTagOut, // @[util.scala:463:14]
output io_deq_bits_uop_fp_ctrl_fromint, // @[util.scala:463:14]
output io_deq_bits_uop_fp_ctrl_toint, // @[util.scala:463:14]
output io_deq_bits_uop_fp_ctrl_fastpipe, // @[util.scala:463:14]
output io_deq_bits_uop_fp_ctrl_fma, // @[util.scala:463:14]
output io_deq_bits_uop_fp_ctrl_div, // @[util.scala:463:14]
output io_deq_bits_uop_fp_ctrl_sqrt, // @[util.scala:463:14]
output io_deq_bits_uop_fp_ctrl_wflags, // @[util.scala:463:14]
output io_deq_bits_uop_fp_ctrl_vec, // @[util.scala:463:14]
output [6:0] io_deq_bits_uop_rob_idx, // @[util.scala:463:14]
output [4:0] io_deq_bits_uop_ldq_idx, // @[util.scala:463:14]
output [4:0] io_deq_bits_uop_stq_idx, // @[util.scala:463:14]
output [1:0] io_deq_bits_uop_rxq_idx, // @[util.scala:463:14]
output [6:0] io_deq_bits_uop_pdst, // @[util.scala:463:14]
output [6:0] io_deq_bits_uop_prs1, // @[util.scala:463:14]
output [6:0] io_deq_bits_uop_prs2, // @[util.scala:463:14]
output [6:0] io_deq_bits_uop_prs3, // @[util.scala:463:14]
output [4:0] io_deq_bits_uop_ppred, // @[util.scala:463:14]
output io_deq_bits_uop_prs1_busy, // @[util.scala:463:14]
output io_deq_bits_uop_prs2_busy, // @[util.scala:463:14]
output io_deq_bits_uop_prs3_busy, // @[util.scala:463:14]
output io_deq_bits_uop_ppred_busy, // @[util.scala:463:14]
output [6:0] io_deq_bits_uop_stale_pdst, // @[util.scala:463:14]
output io_deq_bits_uop_exception, // @[util.scala:463:14]
output [63:0] io_deq_bits_uop_exc_cause, // @[util.scala:463:14]
output [4:0] io_deq_bits_uop_mem_cmd, // @[util.scala:463:14]
output [1:0] io_deq_bits_uop_mem_size, // @[util.scala:463:14]
output io_deq_bits_uop_mem_signed, // @[util.scala:463:14]
output io_deq_bits_uop_uses_ldq, // @[util.scala:463:14]
output io_deq_bits_uop_uses_stq, // @[util.scala:463:14]
output io_deq_bits_uop_is_unique, // @[util.scala:463:14]
output io_deq_bits_uop_flush_on_commit, // @[util.scala:463:14]
output [2:0] io_deq_bits_uop_csr_cmd, // @[util.scala:463:14]
output io_deq_bits_uop_ldst_is_rs1, // @[util.scala:463:14]
output [5:0] io_deq_bits_uop_ldst, // @[util.scala:463:14]
output [5:0] io_deq_bits_uop_lrs1, // @[util.scala:463:14]
output [5:0] io_deq_bits_uop_lrs2, // @[util.scala:463:14]
output [5:0] io_deq_bits_uop_lrs3, // @[util.scala:463:14]
output [1:0] io_deq_bits_uop_dst_rtype, // @[util.scala:463:14]
output [1:0] io_deq_bits_uop_lrs1_rtype, // @[util.scala:463:14]
output [1:0] io_deq_bits_uop_lrs2_rtype, // @[util.scala:463:14]
output io_deq_bits_uop_frs3_en, // @[util.scala:463:14]
output io_deq_bits_uop_fcn_dw, // @[util.scala:463:14]
output [4:0] io_deq_bits_uop_fcn_op, // @[util.scala:463:14]
output io_deq_bits_uop_fp_val, // @[util.scala:463:14]
output [2:0] io_deq_bits_uop_fp_rm, // @[util.scala:463:14]
output [1:0] io_deq_bits_uop_fp_typ, // @[util.scala:463:14]
output io_deq_bits_uop_xcpt_pf_if, // @[util.scala:463:14]
output io_deq_bits_uop_xcpt_ae_if, // @[util.scala:463:14]
output io_deq_bits_uop_xcpt_ma_if, // @[util.scala:463:14]
output io_deq_bits_uop_bp_debug_if, // @[util.scala:463:14]
output io_deq_bits_uop_bp_xcpt_if, // @[util.scala:463:14]
output [2:0] io_deq_bits_uop_debug_fsrc, // @[util.scala:463:14]
output [2:0] io_deq_bits_uop_debug_tsrc, // @[util.scala:463:14]
output [64:0] io_deq_bits_data, // @[util.scala:463:14]
output io_deq_bits_predicated, // @[util.scala:463:14]
output io_deq_bits_fflags_valid, // @[util.scala:463:14]
output [4:0] io_deq_bits_fflags_bits, // @[util.scala:463:14]
input [15:0] io_brupdate_b1_resolve_mask, // @[util.scala:463:14]
input [15:0] io_brupdate_b1_mispredict_mask, // @[util.scala:463:14]
input [31:0] io_brupdate_b2_uop_inst, // @[util.scala:463:14]
input [31:0] io_brupdate_b2_uop_debug_inst, // @[util.scala:463:14]
input io_brupdate_b2_uop_is_rvc, // @[util.scala:463:14]
input [39:0] io_brupdate_b2_uop_debug_pc, // @[util.scala:463:14]
input io_brupdate_b2_uop_iq_type_0, // @[util.scala:463:14]
input io_brupdate_b2_uop_iq_type_1, // @[util.scala:463:14]
input io_brupdate_b2_uop_iq_type_2, // @[util.scala:463:14]
input io_brupdate_b2_uop_iq_type_3, // @[util.scala:463:14]
input io_brupdate_b2_uop_fu_code_0, // @[util.scala:463:14]
input io_brupdate_b2_uop_fu_code_1, // @[util.scala:463:14]
input io_brupdate_b2_uop_fu_code_2, // @[util.scala:463:14]
input io_brupdate_b2_uop_fu_code_3, // @[util.scala:463:14]
input io_brupdate_b2_uop_fu_code_4, // @[util.scala:463:14]
input io_brupdate_b2_uop_fu_code_5, // @[util.scala:463:14]
input io_brupdate_b2_uop_fu_code_6, // @[util.scala:463:14]
input io_brupdate_b2_uop_fu_code_7, // @[util.scala:463:14]
input io_brupdate_b2_uop_fu_code_8, // @[util.scala:463:14]
input io_brupdate_b2_uop_fu_code_9, // @[util.scala:463:14]
input io_brupdate_b2_uop_iw_issued, // @[util.scala:463:14]
input io_brupdate_b2_uop_iw_issued_partial_agen, // @[util.scala:463:14]
input io_brupdate_b2_uop_iw_issued_partial_dgen, // @[util.scala:463:14]
input [2:0] io_brupdate_b2_uop_iw_p1_speculative_child, // @[util.scala:463:14]
input [2:0] io_brupdate_b2_uop_iw_p2_speculative_child, // @[util.scala:463:14]
input io_brupdate_b2_uop_iw_p1_bypass_hint, // @[util.scala:463:14]
input io_brupdate_b2_uop_iw_p2_bypass_hint, // @[util.scala:463:14]
input io_brupdate_b2_uop_iw_p3_bypass_hint, // @[util.scala:463:14]
input [2:0] io_brupdate_b2_uop_dis_col_sel, // @[util.scala:463:14]
input [15:0] io_brupdate_b2_uop_br_mask, // @[util.scala:463:14]
input [3:0] io_brupdate_b2_uop_br_tag, // @[util.scala:463:14]
input [3:0] io_brupdate_b2_uop_br_type, // @[util.scala:463:14]
input io_brupdate_b2_uop_is_sfb, // @[util.scala:463:14]
input io_brupdate_b2_uop_is_fence, // @[util.scala:463:14]
input io_brupdate_b2_uop_is_fencei, // @[util.scala:463:14]
input io_brupdate_b2_uop_is_sfence, // @[util.scala:463:14]
input io_brupdate_b2_uop_is_amo, // @[util.scala:463:14]
input io_brupdate_b2_uop_is_eret, // @[util.scala:463:14]
input io_brupdate_b2_uop_is_sys_pc2epc, // @[util.scala:463:14]
input io_brupdate_b2_uop_is_rocc, // @[util.scala:463:14]
input io_brupdate_b2_uop_is_mov, // @[util.scala:463:14]
input [4:0] io_brupdate_b2_uop_ftq_idx, // @[util.scala:463:14]
input io_brupdate_b2_uop_edge_inst, // @[util.scala:463:14]
input [5:0] io_brupdate_b2_uop_pc_lob, // @[util.scala:463:14]
input io_brupdate_b2_uop_taken, // @[util.scala:463:14]
input io_brupdate_b2_uop_imm_rename, // @[util.scala:463:14]
input [2:0] io_brupdate_b2_uop_imm_sel, // @[util.scala:463:14]
input [4:0] io_brupdate_b2_uop_pimm, // @[util.scala:463:14]
input [19:0] io_brupdate_b2_uop_imm_packed, // @[util.scala:463:14]
input [1:0] io_brupdate_b2_uop_op1_sel, // @[util.scala:463:14]
input [2:0] io_brupdate_b2_uop_op2_sel, // @[util.scala:463:14]
input io_brupdate_b2_uop_fp_ctrl_ldst, // @[util.scala:463:14]
input io_brupdate_b2_uop_fp_ctrl_wen, // @[util.scala:463:14]
input io_brupdate_b2_uop_fp_ctrl_ren1, // @[util.scala:463:14]
input io_brupdate_b2_uop_fp_ctrl_ren2, // @[util.scala:463:14]
input io_brupdate_b2_uop_fp_ctrl_ren3, // @[util.scala:463:14]
input io_brupdate_b2_uop_fp_ctrl_swap12, // @[util.scala:463:14]
input io_brupdate_b2_uop_fp_ctrl_swap23, // @[util.scala:463:14]
input [1:0] io_brupdate_b2_uop_fp_ctrl_typeTagIn, // @[util.scala:463:14]
input [1:0] io_brupdate_b2_uop_fp_ctrl_typeTagOut, // @[util.scala:463:14]
input io_brupdate_b2_uop_fp_ctrl_fromint, // @[util.scala:463:14]
input io_brupdate_b2_uop_fp_ctrl_toint, // @[util.scala:463:14]
input io_brupdate_b2_uop_fp_ctrl_fastpipe, // @[util.scala:463:14]
input io_brupdate_b2_uop_fp_ctrl_fma, // @[util.scala:463:14]
input io_brupdate_b2_uop_fp_ctrl_div, // @[util.scala:463:14]
input io_brupdate_b2_uop_fp_ctrl_sqrt, // @[util.scala:463:14]
input io_brupdate_b2_uop_fp_ctrl_wflags, // @[util.scala:463:14]
input io_brupdate_b2_uop_fp_ctrl_vec, // @[util.scala:463:14]
input [6:0] io_brupdate_b2_uop_rob_idx, // @[util.scala:463:14]
input [4:0] io_brupdate_b2_uop_ldq_idx, // @[util.scala:463:14]
input [4:0] io_brupdate_b2_uop_stq_idx, // @[util.scala:463:14]
input [1:0] io_brupdate_b2_uop_rxq_idx, // @[util.scala:463:14]
input [6:0] io_brupdate_b2_uop_pdst, // @[util.scala:463:14]
input [6:0] io_brupdate_b2_uop_prs1, // @[util.scala:463:14]
input [6:0] io_brupdate_b2_uop_prs2, // @[util.scala:463:14]
input [6:0] io_brupdate_b2_uop_prs3, // @[util.scala:463:14]
input [4:0] io_brupdate_b2_uop_ppred, // @[util.scala:463:14]
input io_brupdate_b2_uop_prs1_busy, // @[util.scala:463:14]
input io_brupdate_b2_uop_prs2_busy, // @[util.scala:463:14]
input io_brupdate_b2_uop_prs3_busy, // @[util.scala:463:14]
input io_brupdate_b2_uop_ppred_busy, // @[util.scala:463:14]
input [6:0] io_brupdate_b2_uop_stale_pdst, // @[util.scala:463:14]
input io_brupdate_b2_uop_exception, // @[util.scala:463:14]
input [63:0] io_brupdate_b2_uop_exc_cause, // @[util.scala:463:14]
input [4:0] io_brupdate_b2_uop_mem_cmd, // @[util.scala:463:14]
input [1:0] io_brupdate_b2_uop_mem_size, // @[util.scala:463:14]
input io_brupdate_b2_uop_mem_signed, // @[util.scala:463:14]
input io_brupdate_b2_uop_uses_ldq, // @[util.scala:463:14]
input io_brupdate_b2_uop_uses_stq, // @[util.scala:463:14]
input io_brupdate_b2_uop_is_unique, // @[util.scala:463:14]
input io_brupdate_b2_uop_flush_on_commit, // @[util.scala:463:14]
input [2:0] io_brupdate_b2_uop_csr_cmd, // @[util.scala:463:14]
input io_brupdate_b2_uop_ldst_is_rs1, // @[util.scala:463:14]
input [5:0] io_brupdate_b2_uop_ldst, // @[util.scala:463:14]
input [5:0] io_brupdate_b2_uop_lrs1, // @[util.scala:463:14]
input [5:0] io_brupdate_b2_uop_lrs2, // @[util.scala:463:14]
input [5:0] io_brupdate_b2_uop_lrs3, // @[util.scala:463:14]
input [1:0] io_brupdate_b2_uop_dst_rtype, // @[util.scala:463:14]
input [1:0] io_brupdate_b2_uop_lrs1_rtype, // @[util.scala:463:14]
input [1:0] io_brupdate_b2_uop_lrs2_rtype, // @[util.scala:463:14]
input io_brupdate_b2_uop_frs3_en, // @[util.scala:463:14]
input io_brupdate_b2_uop_fcn_dw, // @[util.scala:463:14]
input [4:0] io_brupdate_b2_uop_fcn_op, // @[util.scala:463:14]
input io_brupdate_b2_uop_fp_val, // @[util.scala:463:14]
input [2:0] io_brupdate_b2_uop_fp_rm, // @[util.scala:463:14]
input [1:0] io_brupdate_b2_uop_fp_typ, // @[util.scala:463:14]
input io_brupdate_b2_uop_xcpt_pf_if, // @[util.scala:463:14]
input io_brupdate_b2_uop_xcpt_ae_if, // @[util.scala:463:14]
input io_brupdate_b2_uop_xcpt_ma_if, // @[util.scala:463:14]
input io_brupdate_b2_uop_bp_debug_if, // @[util.scala:463:14]
input io_brupdate_b2_uop_bp_xcpt_if, // @[util.scala:463:14]
input [2:0] io_brupdate_b2_uop_debug_fsrc, // @[util.scala:463:14]
input [2:0] io_brupdate_b2_uop_debug_tsrc, // @[util.scala:463:14]
input io_brupdate_b2_mispredict, // @[util.scala:463:14]
input io_brupdate_b2_taken, // @[util.scala:463:14]
input [2:0] io_brupdate_b2_cfi_type, // @[util.scala:463:14]
input [1:0] io_brupdate_b2_pc_sel, // @[util.scala:463:14]
input [39:0] io_brupdate_b2_jalr_target, // @[util.scala:463:14]
input [20:0] io_brupdate_b2_target_offset, // @[util.scala:463:14]
input io_flush, // @[util.scala:463:14]
output [2:0] io_count // @[util.scala:463:14]
);
wire [71:0] _ram_ext_R0_data; // @[util.scala:503:22]
wire io_enq_valid_0 = io_enq_valid; // @[util.scala:458:7]
wire [31:0] io_enq_bits_uop_inst_0 = io_enq_bits_uop_inst; // @[util.scala:458:7]
wire [31:0] io_enq_bits_uop_debug_inst_0 = io_enq_bits_uop_debug_inst; // @[util.scala:458:7]
wire io_enq_bits_uop_is_rvc_0 = io_enq_bits_uop_is_rvc; // @[util.scala:458:7]
wire [39:0] io_enq_bits_uop_debug_pc_0 = io_enq_bits_uop_debug_pc; // @[util.scala:458:7]
wire io_enq_bits_uop_iq_type_0_0 = io_enq_bits_uop_iq_type_0; // @[util.scala:458:7]
wire io_enq_bits_uop_iq_type_1_0 = io_enq_bits_uop_iq_type_1; // @[util.scala:458:7]
wire io_enq_bits_uop_iq_type_2_0 = io_enq_bits_uop_iq_type_2; // @[util.scala:458:7]
wire io_enq_bits_uop_iq_type_3_0 = io_enq_bits_uop_iq_type_3; // @[util.scala:458:7]
wire io_enq_bits_uop_fu_code_0_0 = io_enq_bits_uop_fu_code_0; // @[util.scala:458:7]
wire io_enq_bits_uop_fu_code_1_0 = io_enq_bits_uop_fu_code_1; // @[util.scala:458:7]
wire io_enq_bits_uop_fu_code_2_0 = io_enq_bits_uop_fu_code_2; // @[util.scala:458:7]
wire io_enq_bits_uop_fu_code_3_0 = io_enq_bits_uop_fu_code_3; // @[util.scala:458:7]
wire io_enq_bits_uop_fu_code_4_0 = io_enq_bits_uop_fu_code_4; // @[util.scala:458:7]
wire io_enq_bits_uop_fu_code_5_0 = io_enq_bits_uop_fu_code_5; // @[util.scala:458:7]
wire io_enq_bits_uop_fu_code_6_0 = io_enq_bits_uop_fu_code_6; // @[util.scala:458:7]
wire io_enq_bits_uop_fu_code_7_0 = io_enq_bits_uop_fu_code_7; // @[util.scala:458:7]
wire io_enq_bits_uop_fu_code_8_0 = io_enq_bits_uop_fu_code_8; // @[util.scala:458:7]
wire io_enq_bits_uop_fu_code_9_0 = io_enq_bits_uop_fu_code_9; // @[util.scala:458:7]
wire io_enq_bits_uop_iw_issued_0 = io_enq_bits_uop_iw_issued; // @[util.scala:458:7]
wire io_enq_bits_uop_iw_issued_partial_agen_0 = io_enq_bits_uop_iw_issued_partial_agen; // @[util.scala:458:7]
wire io_enq_bits_uop_iw_issued_partial_dgen_0 = io_enq_bits_uop_iw_issued_partial_dgen; // @[util.scala:458:7]
wire [2:0] io_enq_bits_uop_iw_p1_speculative_child_0 = io_enq_bits_uop_iw_p1_speculative_child; // @[util.scala:458:7]
wire [2:0] io_enq_bits_uop_iw_p2_speculative_child_0 = io_enq_bits_uop_iw_p2_speculative_child; // @[util.scala:458:7]
wire io_enq_bits_uop_iw_p1_bypass_hint_0 = io_enq_bits_uop_iw_p1_bypass_hint; // @[util.scala:458:7]
wire io_enq_bits_uop_iw_p2_bypass_hint_0 = io_enq_bits_uop_iw_p2_bypass_hint; // @[util.scala:458:7]
wire io_enq_bits_uop_iw_p3_bypass_hint_0 = io_enq_bits_uop_iw_p3_bypass_hint; // @[util.scala:458:7]
wire [2:0] io_enq_bits_uop_dis_col_sel_0 = io_enq_bits_uop_dis_col_sel; // @[util.scala:458:7]
wire [15:0] io_enq_bits_uop_br_mask_0 = io_enq_bits_uop_br_mask; // @[util.scala:458:7]
wire [3:0] io_enq_bits_uop_br_tag_0 = io_enq_bits_uop_br_tag; // @[util.scala:458:7]
wire [3:0] io_enq_bits_uop_br_type_0 = io_enq_bits_uop_br_type; // @[util.scala:458:7]
wire io_enq_bits_uop_is_sfb_0 = io_enq_bits_uop_is_sfb; // @[util.scala:458:7]
wire io_enq_bits_uop_is_fence_0 = io_enq_bits_uop_is_fence; // @[util.scala:458:7]
wire io_enq_bits_uop_is_fencei_0 = io_enq_bits_uop_is_fencei; // @[util.scala:458:7]
wire io_enq_bits_uop_is_sfence_0 = io_enq_bits_uop_is_sfence; // @[util.scala:458:7]
wire io_enq_bits_uop_is_amo_0 = io_enq_bits_uop_is_amo; // @[util.scala:458:7]
wire io_enq_bits_uop_is_eret_0 = io_enq_bits_uop_is_eret; // @[util.scala:458:7]
wire io_enq_bits_uop_is_sys_pc2epc_0 = io_enq_bits_uop_is_sys_pc2epc; // @[util.scala:458:7]
wire io_enq_bits_uop_is_rocc_0 = io_enq_bits_uop_is_rocc; // @[util.scala:458:7]
wire io_enq_bits_uop_is_mov_0 = io_enq_bits_uop_is_mov; // @[util.scala:458:7]
wire [4:0] io_enq_bits_uop_ftq_idx_0 = io_enq_bits_uop_ftq_idx; // @[util.scala:458:7]
wire io_enq_bits_uop_edge_inst_0 = io_enq_bits_uop_edge_inst; // @[util.scala:458:7]
wire [5:0] io_enq_bits_uop_pc_lob_0 = io_enq_bits_uop_pc_lob; // @[util.scala:458:7]
wire io_enq_bits_uop_taken_0 = io_enq_bits_uop_taken; // @[util.scala:458:7]
wire io_enq_bits_uop_imm_rename_0 = io_enq_bits_uop_imm_rename; // @[util.scala:458:7]
wire [2:0] io_enq_bits_uop_imm_sel_0 = io_enq_bits_uop_imm_sel; // @[util.scala:458:7]
wire [4:0] io_enq_bits_uop_pimm_0 = io_enq_bits_uop_pimm; // @[util.scala:458:7]
wire [19:0] io_enq_bits_uop_imm_packed_0 = io_enq_bits_uop_imm_packed; // @[util.scala:458:7]
wire [1:0] io_enq_bits_uop_op1_sel_0 = io_enq_bits_uop_op1_sel; // @[util.scala:458:7]
wire [2:0] io_enq_bits_uop_op2_sel_0 = io_enq_bits_uop_op2_sel; // @[util.scala:458:7]
wire io_enq_bits_uop_fp_ctrl_ldst_0 = io_enq_bits_uop_fp_ctrl_ldst; // @[util.scala:458:7]
wire io_enq_bits_uop_fp_ctrl_wen_0 = io_enq_bits_uop_fp_ctrl_wen; // @[util.scala:458:7]
wire io_enq_bits_uop_fp_ctrl_ren1_0 = io_enq_bits_uop_fp_ctrl_ren1; // @[util.scala:458:7]
wire io_enq_bits_uop_fp_ctrl_ren2_0 = io_enq_bits_uop_fp_ctrl_ren2; // @[util.scala:458:7]
wire io_enq_bits_uop_fp_ctrl_ren3_0 = io_enq_bits_uop_fp_ctrl_ren3; // @[util.scala:458:7]
wire io_enq_bits_uop_fp_ctrl_swap12_0 = io_enq_bits_uop_fp_ctrl_swap12; // @[util.scala:458:7]
wire io_enq_bits_uop_fp_ctrl_swap23_0 = io_enq_bits_uop_fp_ctrl_swap23; // @[util.scala:458:7]
wire [1:0] io_enq_bits_uop_fp_ctrl_typeTagIn_0 = io_enq_bits_uop_fp_ctrl_typeTagIn; // @[util.scala:458:7]
wire [1:0] io_enq_bits_uop_fp_ctrl_typeTagOut_0 = io_enq_bits_uop_fp_ctrl_typeTagOut; // @[util.scala:458:7]
wire io_enq_bits_uop_fp_ctrl_fromint_0 = io_enq_bits_uop_fp_ctrl_fromint; // @[util.scala:458:7]
wire io_enq_bits_uop_fp_ctrl_toint_0 = io_enq_bits_uop_fp_ctrl_toint; // @[util.scala:458:7]
wire io_enq_bits_uop_fp_ctrl_fastpipe_0 = io_enq_bits_uop_fp_ctrl_fastpipe; // @[util.scala:458:7]
wire io_enq_bits_uop_fp_ctrl_fma_0 = io_enq_bits_uop_fp_ctrl_fma; // @[util.scala:458:7]
wire io_enq_bits_uop_fp_ctrl_div_0 = io_enq_bits_uop_fp_ctrl_div; // @[util.scala:458:7]
wire io_enq_bits_uop_fp_ctrl_sqrt_0 = io_enq_bits_uop_fp_ctrl_sqrt; // @[util.scala:458:7]
wire io_enq_bits_uop_fp_ctrl_wflags_0 = io_enq_bits_uop_fp_ctrl_wflags; // @[util.scala:458:7]
wire io_enq_bits_uop_fp_ctrl_vec_0 = io_enq_bits_uop_fp_ctrl_vec; // @[util.scala:458:7]
wire [6:0] io_enq_bits_uop_rob_idx_0 = io_enq_bits_uop_rob_idx; // @[util.scala:458:7]
wire [4:0] io_enq_bits_uop_ldq_idx_0 = io_enq_bits_uop_ldq_idx; // @[util.scala:458:7]
wire [4:0] io_enq_bits_uop_stq_idx_0 = io_enq_bits_uop_stq_idx; // @[util.scala:458:7]
wire [1:0] io_enq_bits_uop_rxq_idx_0 = io_enq_bits_uop_rxq_idx; // @[util.scala:458:7]
wire [6:0] io_enq_bits_uop_pdst_0 = io_enq_bits_uop_pdst; // @[util.scala:458:7]
wire [6:0] io_enq_bits_uop_prs1_0 = io_enq_bits_uop_prs1; // @[util.scala:458:7]
wire [6:0] io_enq_bits_uop_prs2_0 = io_enq_bits_uop_prs2; // @[util.scala:458:7]
wire [6:0] io_enq_bits_uop_prs3_0 = io_enq_bits_uop_prs3; // @[util.scala:458:7]
wire [4:0] io_enq_bits_uop_ppred_0 = io_enq_bits_uop_ppred; // @[util.scala:458:7]
wire io_enq_bits_uop_prs1_busy_0 = io_enq_bits_uop_prs1_busy; // @[util.scala:458:7]
wire io_enq_bits_uop_prs2_busy_0 = io_enq_bits_uop_prs2_busy; // @[util.scala:458:7]
wire io_enq_bits_uop_prs3_busy_0 = io_enq_bits_uop_prs3_busy; // @[util.scala:458:7]
wire io_enq_bits_uop_ppred_busy_0 = io_enq_bits_uop_ppred_busy; // @[util.scala:458:7]
wire [6:0] io_enq_bits_uop_stale_pdst_0 = io_enq_bits_uop_stale_pdst; // @[util.scala:458:7]
wire io_enq_bits_uop_exception_0 = io_enq_bits_uop_exception; // @[util.scala:458:7]
wire [63:0] io_enq_bits_uop_exc_cause_0 = io_enq_bits_uop_exc_cause; // @[util.scala:458:7]
wire [4:0] io_enq_bits_uop_mem_cmd_0 = io_enq_bits_uop_mem_cmd; // @[util.scala:458:7]
wire [1:0] io_enq_bits_uop_mem_size_0 = io_enq_bits_uop_mem_size; // @[util.scala:458:7]
wire io_enq_bits_uop_mem_signed_0 = io_enq_bits_uop_mem_signed; // @[util.scala:458:7]
wire io_enq_bits_uop_uses_ldq_0 = io_enq_bits_uop_uses_ldq; // @[util.scala:458:7]
wire io_enq_bits_uop_uses_stq_0 = io_enq_bits_uop_uses_stq; // @[util.scala:458:7]
wire io_enq_bits_uop_is_unique_0 = io_enq_bits_uop_is_unique; // @[util.scala:458:7]
wire io_enq_bits_uop_flush_on_commit_0 = io_enq_bits_uop_flush_on_commit; // @[util.scala:458:7]
wire [2:0] io_enq_bits_uop_csr_cmd_0 = io_enq_bits_uop_csr_cmd; // @[util.scala:458:7]
wire io_enq_bits_uop_ldst_is_rs1_0 = io_enq_bits_uop_ldst_is_rs1; // @[util.scala:458:7]
wire [5:0] io_enq_bits_uop_ldst_0 = io_enq_bits_uop_ldst; // @[util.scala:458:7]
wire [5:0] io_enq_bits_uop_lrs1_0 = io_enq_bits_uop_lrs1; // @[util.scala:458:7]
wire [5:0] io_enq_bits_uop_lrs2_0 = io_enq_bits_uop_lrs2; // @[util.scala:458:7]
wire [5:0] io_enq_bits_uop_lrs3_0 = io_enq_bits_uop_lrs3; // @[util.scala:458:7]
wire [1:0] io_enq_bits_uop_dst_rtype_0 = io_enq_bits_uop_dst_rtype; // @[util.scala:458:7]
wire [1:0] io_enq_bits_uop_lrs1_rtype_0 = io_enq_bits_uop_lrs1_rtype; // @[util.scala:458:7]
wire [1:0] io_enq_bits_uop_lrs2_rtype_0 = io_enq_bits_uop_lrs2_rtype; // @[util.scala:458:7]
wire io_enq_bits_uop_frs3_en_0 = io_enq_bits_uop_frs3_en; // @[util.scala:458:7]
wire io_enq_bits_uop_fcn_dw_0 = io_enq_bits_uop_fcn_dw; // @[util.scala:458:7]
wire [4:0] io_enq_bits_uop_fcn_op_0 = io_enq_bits_uop_fcn_op; // @[util.scala:458:7]
wire io_enq_bits_uop_fp_val_0 = io_enq_bits_uop_fp_val; // @[util.scala:458:7]
wire [2:0] io_enq_bits_uop_fp_rm_0 = io_enq_bits_uop_fp_rm; // @[util.scala:458:7]
wire [1:0] io_enq_bits_uop_fp_typ_0 = io_enq_bits_uop_fp_typ; // @[util.scala:458:7]
wire io_enq_bits_uop_xcpt_pf_if_0 = io_enq_bits_uop_xcpt_pf_if; // @[util.scala:458:7]
wire io_enq_bits_uop_xcpt_ae_if_0 = io_enq_bits_uop_xcpt_ae_if; // @[util.scala:458:7]
wire io_enq_bits_uop_xcpt_ma_if_0 = io_enq_bits_uop_xcpt_ma_if; // @[util.scala:458:7]
wire io_enq_bits_uop_bp_debug_if_0 = io_enq_bits_uop_bp_debug_if; // @[util.scala:458:7]
wire io_enq_bits_uop_bp_xcpt_if_0 = io_enq_bits_uop_bp_xcpt_if; // @[util.scala:458:7]
wire [2:0] io_enq_bits_uop_debug_fsrc_0 = io_enq_bits_uop_debug_fsrc; // @[util.scala:458:7]
wire [2:0] io_enq_bits_uop_debug_tsrc_0 = io_enq_bits_uop_debug_tsrc; // @[util.scala:458:7]
wire [64:0] io_enq_bits_data_0 = io_enq_bits_data; // @[util.scala:458:7]
wire io_enq_bits_fflags_valid_0 = io_enq_bits_fflags_valid; // @[util.scala:458:7]
wire [4:0] io_enq_bits_fflags_bits_0 = io_enq_bits_fflags_bits; // @[util.scala:458:7]
wire io_deq_ready_0 = io_deq_ready; // @[util.scala:458:7]
wire [15:0] io_brupdate_b1_resolve_mask_0 = io_brupdate_b1_resolve_mask; // @[util.scala:458:7]
wire [15:0] io_brupdate_b1_mispredict_mask_0 = io_brupdate_b1_mispredict_mask; // @[util.scala:458:7]
wire [31:0] io_brupdate_b2_uop_inst_0 = io_brupdate_b2_uop_inst; // @[util.scala:458:7]
wire [31:0] io_brupdate_b2_uop_debug_inst_0 = io_brupdate_b2_uop_debug_inst; // @[util.scala:458:7]
wire io_brupdate_b2_uop_is_rvc_0 = io_brupdate_b2_uop_is_rvc; // @[util.scala:458:7]
wire [39:0] io_brupdate_b2_uop_debug_pc_0 = io_brupdate_b2_uop_debug_pc; // @[util.scala:458:7]
wire io_brupdate_b2_uop_iq_type_0_0 = io_brupdate_b2_uop_iq_type_0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_iq_type_1_0 = io_brupdate_b2_uop_iq_type_1; // @[util.scala:458:7]
wire io_brupdate_b2_uop_iq_type_2_0 = io_brupdate_b2_uop_iq_type_2; // @[util.scala:458:7]
wire io_brupdate_b2_uop_iq_type_3_0 = io_brupdate_b2_uop_iq_type_3; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fu_code_0_0 = io_brupdate_b2_uop_fu_code_0; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fu_code_1_0 = io_brupdate_b2_uop_fu_code_1; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fu_code_2_0 = io_brupdate_b2_uop_fu_code_2; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fu_code_3_0 = io_brupdate_b2_uop_fu_code_3; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fu_code_4_0 = io_brupdate_b2_uop_fu_code_4; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fu_code_5_0 = io_brupdate_b2_uop_fu_code_5; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fu_code_6_0 = io_brupdate_b2_uop_fu_code_6; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fu_code_7_0 = io_brupdate_b2_uop_fu_code_7; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fu_code_8_0 = io_brupdate_b2_uop_fu_code_8; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fu_code_9_0 = io_brupdate_b2_uop_fu_code_9; // @[util.scala:458:7]
wire io_brupdate_b2_uop_iw_issued_0 = io_brupdate_b2_uop_iw_issued; // @[util.scala:458:7]
wire io_brupdate_b2_uop_iw_issued_partial_agen_0 = io_brupdate_b2_uop_iw_issued_partial_agen; // @[util.scala:458:7]
wire io_brupdate_b2_uop_iw_issued_partial_dgen_0 = io_brupdate_b2_uop_iw_issued_partial_dgen; // @[util.scala:458:7]
wire [2:0] io_brupdate_b2_uop_iw_p1_speculative_child_0 = io_brupdate_b2_uop_iw_p1_speculative_child; // @[util.scala:458:7]
wire [2:0] io_brupdate_b2_uop_iw_p2_speculative_child_0 = io_brupdate_b2_uop_iw_p2_speculative_child; // @[util.scala:458:7]
wire io_brupdate_b2_uop_iw_p1_bypass_hint_0 = io_brupdate_b2_uop_iw_p1_bypass_hint; // @[util.scala:458:7]
wire io_brupdate_b2_uop_iw_p2_bypass_hint_0 = io_brupdate_b2_uop_iw_p2_bypass_hint; // @[util.scala:458:7]
wire io_brupdate_b2_uop_iw_p3_bypass_hint_0 = io_brupdate_b2_uop_iw_p3_bypass_hint; // @[util.scala:458:7]
wire [2:0] io_brupdate_b2_uop_dis_col_sel_0 = io_brupdate_b2_uop_dis_col_sel; // @[util.scala:458:7]
wire [15:0] io_brupdate_b2_uop_br_mask_0 = io_brupdate_b2_uop_br_mask; // @[util.scala:458:7]
wire [3:0] io_brupdate_b2_uop_br_tag_0 = io_brupdate_b2_uop_br_tag; // @[util.scala:458:7]
wire [3:0] io_brupdate_b2_uop_br_type_0 = io_brupdate_b2_uop_br_type; // @[util.scala:458:7]
wire io_brupdate_b2_uop_is_sfb_0 = io_brupdate_b2_uop_is_sfb; // @[util.scala:458:7]
wire io_brupdate_b2_uop_is_fence_0 = io_brupdate_b2_uop_is_fence; // @[util.scala:458:7]
wire io_brupdate_b2_uop_is_fencei_0 = io_brupdate_b2_uop_is_fencei; // @[util.scala:458:7]
wire io_brupdate_b2_uop_is_sfence_0 = io_brupdate_b2_uop_is_sfence; // @[util.scala:458:7]
wire io_brupdate_b2_uop_is_amo_0 = io_brupdate_b2_uop_is_amo; // @[util.scala:458:7]
wire io_brupdate_b2_uop_is_eret_0 = io_brupdate_b2_uop_is_eret; // @[util.scala:458:7]
wire io_brupdate_b2_uop_is_sys_pc2epc_0 = io_brupdate_b2_uop_is_sys_pc2epc; // @[util.scala:458:7]
wire io_brupdate_b2_uop_is_rocc_0 = io_brupdate_b2_uop_is_rocc; // @[util.scala:458:7]
wire io_brupdate_b2_uop_is_mov_0 = io_brupdate_b2_uop_is_mov; // @[util.scala:458:7]
wire [4:0] io_brupdate_b2_uop_ftq_idx_0 = io_brupdate_b2_uop_ftq_idx; // @[util.scala:458:7]
wire io_brupdate_b2_uop_edge_inst_0 = io_brupdate_b2_uop_edge_inst; // @[util.scala:458:7]
wire [5:0] io_brupdate_b2_uop_pc_lob_0 = io_brupdate_b2_uop_pc_lob; // @[util.scala:458:7]
wire io_brupdate_b2_uop_taken_0 = io_brupdate_b2_uop_taken; // @[util.scala:458:7]
wire io_brupdate_b2_uop_imm_rename_0 = io_brupdate_b2_uop_imm_rename; // @[util.scala:458:7]
wire [2:0] io_brupdate_b2_uop_imm_sel_0 = io_brupdate_b2_uop_imm_sel; // @[util.scala:458:7]
wire [4:0] io_brupdate_b2_uop_pimm_0 = io_brupdate_b2_uop_pimm; // @[util.scala:458:7]
wire [19:0] io_brupdate_b2_uop_imm_packed_0 = io_brupdate_b2_uop_imm_packed; // @[util.scala:458:7]
wire [1:0] io_brupdate_b2_uop_op1_sel_0 = io_brupdate_b2_uop_op1_sel; // @[util.scala:458:7]
wire [2:0] io_brupdate_b2_uop_op2_sel_0 = io_brupdate_b2_uop_op2_sel; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fp_ctrl_ldst_0 = io_brupdate_b2_uop_fp_ctrl_ldst; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fp_ctrl_wen_0 = io_brupdate_b2_uop_fp_ctrl_wen; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fp_ctrl_ren1_0 = io_brupdate_b2_uop_fp_ctrl_ren1; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fp_ctrl_ren2_0 = io_brupdate_b2_uop_fp_ctrl_ren2; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fp_ctrl_ren3_0 = io_brupdate_b2_uop_fp_ctrl_ren3; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fp_ctrl_swap12_0 = io_brupdate_b2_uop_fp_ctrl_swap12; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fp_ctrl_swap23_0 = io_brupdate_b2_uop_fp_ctrl_swap23; // @[util.scala:458:7]
wire [1:0] io_brupdate_b2_uop_fp_ctrl_typeTagIn_0 = io_brupdate_b2_uop_fp_ctrl_typeTagIn; // @[util.scala:458:7]
wire [1:0] io_brupdate_b2_uop_fp_ctrl_typeTagOut_0 = io_brupdate_b2_uop_fp_ctrl_typeTagOut; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fp_ctrl_fromint_0 = io_brupdate_b2_uop_fp_ctrl_fromint; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fp_ctrl_toint_0 = io_brupdate_b2_uop_fp_ctrl_toint; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fp_ctrl_fastpipe_0 = io_brupdate_b2_uop_fp_ctrl_fastpipe; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fp_ctrl_fma_0 = io_brupdate_b2_uop_fp_ctrl_fma; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fp_ctrl_div_0 = io_brupdate_b2_uop_fp_ctrl_div; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fp_ctrl_sqrt_0 = io_brupdate_b2_uop_fp_ctrl_sqrt; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fp_ctrl_wflags_0 = io_brupdate_b2_uop_fp_ctrl_wflags; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fp_ctrl_vec_0 = io_brupdate_b2_uop_fp_ctrl_vec; // @[util.scala:458:7]
wire [6:0] io_brupdate_b2_uop_rob_idx_0 = io_brupdate_b2_uop_rob_idx; // @[util.scala:458:7]
wire [4:0] io_brupdate_b2_uop_ldq_idx_0 = io_brupdate_b2_uop_ldq_idx; // @[util.scala:458:7]
wire [4:0] io_brupdate_b2_uop_stq_idx_0 = io_brupdate_b2_uop_stq_idx; // @[util.scala:458:7]
wire [1:0] io_brupdate_b2_uop_rxq_idx_0 = io_brupdate_b2_uop_rxq_idx; // @[util.scala:458:7]
wire [6:0] io_brupdate_b2_uop_pdst_0 = io_brupdate_b2_uop_pdst; // @[util.scala:458:7]
wire [6:0] io_brupdate_b2_uop_prs1_0 = io_brupdate_b2_uop_prs1; // @[util.scala:458:7]
wire [6:0] io_brupdate_b2_uop_prs2_0 = io_brupdate_b2_uop_prs2; // @[util.scala:458:7]
wire [6:0] io_brupdate_b2_uop_prs3_0 = io_brupdate_b2_uop_prs3; // @[util.scala:458:7]
wire [4:0] io_brupdate_b2_uop_ppred_0 = io_brupdate_b2_uop_ppred; // @[util.scala:458:7]
wire io_brupdate_b2_uop_prs1_busy_0 = io_brupdate_b2_uop_prs1_busy; // @[util.scala:458:7]
wire io_brupdate_b2_uop_prs2_busy_0 = io_brupdate_b2_uop_prs2_busy; // @[util.scala:458:7]
wire io_brupdate_b2_uop_prs3_busy_0 = io_brupdate_b2_uop_prs3_busy; // @[util.scala:458:7]
wire io_brupdate_b2_uop_ppred_busy_0 = io_brupdate_b2_uop_ppred_busy; // @[util.scala:458:7]
wire [6:0] io_brupdate_b2_uop_stale_pdst_0 = io_brupdate_b2_uop_stale_pdst; // @[util.scala:458:7]
wire io_brupdate_b2_uop_exception_0 = io_brupdate_b2_uop_exception; // @[util.scala:458:7]
wire [63:0] io_brupdate_b2_uop_exc_cause_0 = io_brupdate_b2_uop_exc_cause; // @[util.scala:458:7]
wire [4:0] io_brupdate_b2_uop_mem_cmd_0 = io_brupdate_b2_uop_mem_cmd; // @[util.scala:458:7]
wire [1:0] io_brupdate_b2_uop_mem_size_0 = io_brupdate_b2_uop_mem_size; // @[util.scala:458:7]
wire io_brupdate_b2_uop_mem_signed_0 = io_brupdate_b2_uop_mem_signed; // @[util.scala:458:7]
wire io_brupdate_b2_uop_uses_ldq_0 = io_brupdate_b2_uop_uses_ldq; // @[util.scala:458:7]
wire io_brupdate_b2_uop_uses_stq_0 = io_brupdate_b2_uop_uses_stq; // @[util.scala:458:7]
wire io_brupdate_b2_uop_is_unique_0 = io_brupdate_b2_uop_is_unique; // @[util.scala:458:7]
wire io_brupdate_b2_uop_flush_on_commit_0 = io_brupdate_b2_uop_flush_on_commit; // @[util.scala:458:7]
wire [2:0] io_brupdate_b2_uop_csr_cmd_0 = io_brupdate_b2_uop_csr_cmd; // @[util.scala:458:7]
wire io_brupdate_b2_uop_ldst_is_rs1_0 = io_brupdate_b2_uop_ldst_is_rs1; // @[util.scala:458:7]
wire [5:0] io_brupdate_b2_uop_ldst_0 = io_brupdate_b2_uop_ldst; // @[util.scala:458:7]
wire [5:0] io_brupdate_b2_uop_lrs1_0 = io_brupdate_b2_uop_lrs1; // @[util.scala:458:7]
wire [5:0] io_brupdate_b2_uop_lrs2_0 = io_brupdate_b2_uop_lrs2; // @[util.scala:458:7]
wire [5:0] io_brupdate_b2_uop_lrs3_0 = io_brupdate_b2_uop_lrs3; // @[util.scala:458:7]
wire [1:0] io_brupdate_b2_uop_dst_rtype_0 = io_brupdate_b2_uop_dst_rtype; // @[util.scala:458:7]
wire [1:0] io_brupdate_b2_uop_lrs1_rtype_0 = io_brupdate_b2_uop_lrs1_rtype; // @[util.scala:458:7]
wire [1:0] io_brupdate_b2_uop_lrs2_rtype_0 = io_brupdate_b2_uop_lrs2_rtype; // @[util.scala:458:7]
wire io_brupdate_b2_uop_frs3_en_0 = io_brupdate_b2_uop_frs3_en; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fcn_dw_0 = io_brupdate_b2_uop_fcn_dw; // @[util.scala:458:7]
wire [4:0] io_brupdate_b2_uop_fcn_op_0 = io_brupdate_b2_uop_fcn_op; // @[util.scala:458:7]
wire io_brupdate_b2_uop_fp_val_0 = io_brupdate_b2_uop_fp_val; // @[util.scala:458:7]
wire [2:0] io_brupdate_b2_uop_fp_rm_0 = io_brupdate_b2_uop_fp_rm; // @[util.scala:458:7]
wire [1:0] io_brupdate_b2_uop_fp_typ_0 = io_brupdate_b2_uop_fp_typ; // @[util.scala:458:7]
wire io_brupdate_b2_uop_xcpt_pf_if_0 = io_brupdate_b2_uop_xcpt_pf_if; // @[util.scala:458:7]
wire io_brupdate_b2_uop_xcpt_ae_if_0 = io_brupdate_b2_uop_xcpt_ae_if; // @[util.scala:458:7]
wire io_brupdate_b2_uop_xcpt_ma_if_0 = io_brupdate_b2_uop_xcpt_ma_if; // @[util.scala:458:7]
wire io_brupdate_b2_uop_bp_debug_if_0 = io_brupdate_b2_uop_bp_debug_if; // @[util.scala:458:7]
wire io_brupdate_b2_uop_bp_xcpt_if_0 = io_brupdate_b2_uop_bp_xcpt_if; // @[util.scala:458:7]
wire [2:0] io_brupdate_b2_uop_debug_fsrc_0 = io_brupdate_b2_uop_debug_fsrc; // @[util.scala:458:7]
wire [2:0] io_brupdate_b2_uop_debug_tsrc_0 = io_brupdate_b2_uop_debug_tsrc; // @[util.scala:458:7]
wire io_brupdate_b2_mispredict_0 = io_brupdate_b2_mispredict; // @[util.scala:458:7]
wire io_brupdate_b2_taken_0 = io_brupdate_b2_taken; // @[util.scala:458:7]
wire [2:0] io_brupdate_b2_cfi_type_0 = io_brupdate_b2_cfi_type; // @[util.scala:458:7]
wire [1:0] io_brupdate_b2_pc_sel_0 = io_brupdate_b2_pc_sel; // @[util.scala:458:7]
wire [39:0] io_brupdate_b2_jalr_target_0 = io_brupdate_b2_jalr_target; // @[util.scala:458:7]
wire [20:0] io_brupdate_b2_target_offset_0 = io_brupdate_b2_target_offset; // @[util.scala:458:7]
wire io_flush_0 = io_flush; // @[util.scala:458:7]
wire io_enq_bits_predicated = 1'h0; // @[util.scala:458:7]
wire _valids_WIRE_0 = 1'h0; // @[util.scala:504:34]
wire _valids_WIRE_1 = 1'h0; // @[util.scala:504:34]
wire _valids_WIRE_2 = 1'h0; // @[util.scala:504:34]
wire _valids_WIRE_3 = 1'h0; // @[util.scala:504:34]
wire _valids_WIRE_4 = 1'h0; // @[util.scala:504:34]
wire _valids_WIRE_5 = 1'h0; // @[util.scala:504:34]
wire _valids_WIRE_6 = 1'h0; // @[util.scala:504:34]
wire _valids_WIRE_7 = 1'h0; // @[util.scala:504:34]
wire _io_enq_ready_T; // @[util.scala:543:21]
wire _io_deq_valid_T_1; // @[util.scala:548:42]
wire [31:0] out_uop_inst; // @[util.scala:545:19]
wire [31:0] out_uop_debug_inst; // @[util.scala:545:19]
wire out_uop_is_rvc; // @[util.scala:545:19]
wire [39:0] out_uop_debug_pc; // @[util.scala:545:19]
wire out_uop_iq_type_0; // @[util.scala:545:19]
wire out_uop_iq_type_1; // @[util.scala:545:19]
wire out_uop_iq_type_2; // @[util.scala:545:19]
wire out_uop_iq_type_3; // @[util.scala:545:19]
wire out_uop_fu_code_0; // @[util.scala:545:19]
wire out_uop_fu_code_1; // @[util.scala:545:19]
wire out_uop_fu_code_2; // @[util.scala:545:19]
wire out_uop_fu_code_3; // @[util.scala:545:19]
wire out_uop_fu_code_4; // @[util.scala:545:19]
wire out_uop_fu_code_5; // @[util.scala:545:19]
wire out_uop_fu_code_6; // @[util.scala:545:19]
wire out_uop_fu_code_7; // @[util.scala:545:19]
wire out_uop_fu_code_8; // @[util.scala:545:19]
wire out_uop_fu_code_9; // @[util.scala:545:19]
wire out_uop_iw_issued; // @[util.scala:545:19]
wire out_uop_iw_issued_partial_agen; // @[util.scala:545:19]
wire out_uop_iw_issued_partial_dgen; // @[util.scala:545:19]
wire [2:0] out_uop_iw_p1_speculative_child; // @[util.scala:545:19]
wire [2:0] out_uop_iw_p2_speculative_child; // @[util.scala:545:19]
wire out_uop_iw_p1_bypass_hint; // @[util.scala:545:19]
wire out_uop_iw_p2_bypass_hint; // @[util.scala:545:19]
wire out_uop_iw_p3_bypass_hint; // @[util.scala:545:19]
wire [2:0] out_uop_dis_col_sel; // @[util.scala:545:19]
wire [15:0] out_uop_br_mask; // @[util.scala:545:19]
wire [3:0] out_uop_br_tag; // @[util.scala:545:19]
wire [3:0] out_uop_br_type; // @[util.scala:545:19]
wire out_uop_is_sfb; // @[util.scala:545:19]
wire out_uop_is_fence; // @[util.scala:545:19]
wire out_uop_is_fencei; // @[util.scala:545:19]
wire out_uop_is_sfence; // @[util.scala:545:19]
wire out_uop_is_amo; // @[util.scala:545:19]
wire out_uop_is_eret; // @[util.scala:545:19]
wire out_uop_is_sys_pc2epc; // @[util.scala:545:19]
wire out_uop_is_rocc; // @[util.scala:545:19]
wire out_uop_is_mov; // @[util.scala:545:19]
wire [4:0] out_uop_ftq_idx; // @[util.scala:545:19]
wire out_uop_edge_inst; // @[util.scala:545:19]
wire [5:0] out_uop_pc_lob; // @[util.scala:545:19]
wire out_uop_taken; // @[util.scala:545:19]
wire out_uop_imm_rename; // @[util.scala:545:19]
wire [2:0] out_uop_imm_sel; // @[util.scala:545:19]
wire [4:0] out_uop_pimm; // @[util.scala:545:19]
wire [19:0] out_uop_imm_packed; // @[util.scala:545:19]
wire [1:0] out_uop_op1_sel; // @[util.scala:545:19]
wire [2:0] out_uop_op2_sel; // @[util.scala:545:19]
wire out_uop_fp_ctrl_ldst; // @[util.scala:545:19]
wire out_uop_fp_ctrl_wen; // @[util.scala:545:19]
wire out_uop_fp_ctrl_ren1; // @[util.scala:545:19]
wire out_uop_fp_ctrl_ren2; // @[util.scala:545:19]
wire out_uop_fp_ctrl_ren3; // @[util.scala:545:19]
wire out_uop_fp_ctrl_swap12; // @[util.scala:545:19]
wire out_uop_fp_ctrl_swap23; // @[util.scala:545:19]
wire [1:0] out_uop_fp_ctrl_typeTagIn; // @[util.scala:545:19]
wire [1:0] out_uop_fp_ctrl_typeTagOut; // @[util.scala:545:19]
wire out_uop_fp_ctrl_fromint; // @[util.scala:545:19]
wire out_uop_fp_ctrl_toint; // @[util.scala:545:19]
wire out_uop_fp_ctrl_fastpipe; // @[util.scala:545:19]
wire out_uop_fp_ctrl_fma; // @[util.scala:545:19]
wire out_uop_fp_ctrl_div; // @[util.scala:545:19]
wire out_uop_fp_ctrl_sqrt; // @[util.scala:545:19]
wire out_uop_fp_ctrl_wflags; // @[util.scala:545:19]
wire out_uop_fp_ctrl_vec; // @[util.scala:545:19]
wire [6:0] out_uop_rob_idx; // @[util.scala:545:19]
wire [4:0] out_uop_ldq_idx; // @[util.scala:545:19]
wire [4:0] out_uop_stq_idx; // @[util.scala:545:19]
wire [1:0] out_uop_rxq_idx; // @[util.scala:545:19]
wire [6:0] out_uop_pdst; // @[util.scala:545:19]
wire [6:0] out_uop_prs1; // @[util.scala:545:19]
wire [6:0] out_uop_prs2; // @[util.scala:545:19]
wire [6:0] out_uop_prs3; // @[util.scala:545:19]
wire [4:0] out_uop_ppred; // @[util.scala:545:19]
wire out_uop_prs1_busy; // @[util.scala:545:19]
wire out_uop_prs2_busy; // @[util.scala:545:19]
wire out_uop_prs3_busy; // @[util.scala:545:19]
wire out_uop_ppred_busy; // @[util.scala:545:19]
wire [6:0] out_uop_stale_pdst; // @[util.scala:545:19]
wire out_uop_exception; // @[util.scala:545:19]
wire [63:0] out_uop_exc_cause; // @[util.scala:545:19]
wire [4:0] out_uop_mem_cmd; // @[util.scala:545:19]
wire [1:0] out_uop_mem_size; // @[util.scala:545:19]
wire out_uop_mem_signed; // @[util.scala:545:19]
wire out_uop_uses_ldq; // @[util.scala:545:19]
wire out_uop_uses_stq; // @[util.scala:545:19]
wire out_uop_is_unique; // @[util.scala:545:19]
wire out_uop_flush_on_commit; // @[util.scala:545:19]
wire [2:0] out_uop_csr_cmd; // @[util.scala:545:19]
wire out_uop_ldst_is_rs1; // @[util.scala:545:19]
wire [5:0] out_uop_ldst; // @[util.scala:545:19]
wire [5:0] out_uop_lrs1; // @[util.scala:545:19]
wire [5:0] out_uop_lrs2; // @[util.scala:545:19]
wire [5:0] out_uop_lrs3; // @[util.scala:545:19]
wire [1:0] out_uop_dst_rtype; // @[util.scala:545:19]
wire [1:0] out_uop_lrs1_rtype; // @[util.scala:545:19]
wire [1:0] out_uop_lrs2_rtype; // @[util.scala:545:19]
wire out_uop_frs3_en; // @[util.scala:545:19]
wire out_uop_fcn_dw; // @[util.scala:545:19]
wire [4:0] out_uop_fcn_op; // @[util.scala:545:19]
wire out_uop_fp_val; // @[util.scala:545:19]
wire [2:0] out_uop_fp_rm; // @[util.scala:545:19]
wire [1:0] out_uop_fp_typ; // @[util.scala:545:19]
wire out_uop_xcpt_pf_if; // @[util.scala:545:19]
wire out_uop_xcpt_ae_if; // @[util.scala:545:19]
wire out_uop_xcpt_ma_if; // @[util.scala:545:19]
wire out_uop_bp_debug_if; // @[util.scala:545:19]
wire out_uop_bp_xcpt_if; // @[util.scala:545:19]
wire [2:0] out_uop_debug_fsrc; // @[util.scala:545:19]
wire [2:0] out_uop_debug_tsrc; // @[util.scala:545:19]
wire [64:0] out_data; // @[util.scala:545:19]
wire out_predicated; // @[util.scala:545:19]
wire out_fflags_valid; // @[util.scala:545:19]
wire [4:0] out_fflags_bits; // @[util.scala:545:19]
wire _io_empty_T_1; // @[util.scala:512:27]
wire _do_enq_T_6 = io_flush_0; // @[util.scala:458:7, :514:113]
wire _valids_0_T_5 = io_flush_0; // @[util.scala:458:7, :520:94]
wire _valids_1_T_5 = io_flush_0; // @[util.scala:458:7, :520:94]
wire _valids_2_T_5 = io_flush_0; // @[util.scala:458:7, :520:94]
wire _valids_3_T_5 = io_flush_0; // @[util.scala:458:7, :520:94]
wire _valids_4_T_5 = io_flush_0; // @[util.scala:458:7, :520:94]
wire _valids_5_T_5 = io_flush_0; // @[util.scala:458:7, :520:94]
wire _valids_6_T_5 = io_flush_0; // @[util.scala:458:7, :520:94]
wire _valids_7_T_5 = io_flush_0; // @[util.scala:458:7, :520:94]
wire io_enq_ready_0; // @[util.scala:458:7]
wire io_deq_bits_uop_iq_type_0_0; // @[util.scala:458:7]
wire io_deq_bits_uop_iq_type_1_0; // @[util.scala:458:7]
wire io_deq_bits_uop_iq_type_2_0; // @[util.scala:458:7]
wire io_deq_bits_uop_iq_type_3_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fu_code_0_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fu_code_1_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fu_code_2_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fu_code_3_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fu_code_4_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fu_code_5_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fu_code_6_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fu_code_7_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fu_code_8_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fu_code_9_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fp_ctrl_ldst_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fp_ctrl_wen_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fp_ctrl_ren1_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fp_ctrl_ren2_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fp_ctrl_ren3_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fp_ctrl_swap12_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fp_ctrl_swap23_0; // @[util.scala:458:7]
wire [1:0] io_deq_bits_uop_fp_ctrl_typeTagIn_0; // @[util.scala:458:7]
wire [1:0] io_deq_bits_uop_fp_ctrl_typeTagOut_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fp_ctrl_fromint_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fp_ctrl_toint_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fp_ctrl_fastpipe_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fp_ctrl_fma_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fp_ctrl_div_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fp_ctrl_sqrt_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fp_ctrl_wflags_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fp_ctrl_vec_0; // @[util.scala:458:7]
wire [31:0] io_deq_bits_uop_inst_0; // @[util.scala:458:7]
wire [31:0] io_deq_bits_uop_debug_inst_0; // @[util.scala:458:7]
wire io_deq_bits_uop_is_rvc_0; // @[util.scala:458:7]
wire [39:0] io_deq_bits_uop_debug_pc_0; // @[util.scala:458:7]
wire io_deq_bits_uop_iw_issued_0; // @[util.scala:458:7]
wire io_deq_bits_uop_iw_issued_partial_agen_0; // @[util.scala:458:7]
wire io_deq_bits_uop_iw_issued_partial_dgen_0; // @[util.scala:458:7]
wire [2:0] io_deq_bits_uop_iw_p1_speculative_child_0; // @[util.scala:458:7]
wire [2:0] io_deq_bits_uop_iw_p2_speculative_child_0; // @[util.scala:458:7]
wire io_deq_bits_uop_iw_p1_bypass_hint_0; // @[util.scala:458:7]
wire io_deq_bits_uop_iw_p2_bypass_hint_0; // @[util.scala:458:7]
wire io_deq_bits_uop_iw_p3_bypass_hint_0; // @[util.scala:458:7]
wire [2:0] io_deq_bits_uop_dis_col_sel_0; // @[util.scala:458:7]
wire [15:0] io_deq_bits_uop_br_mask_0; // @[util.scala:458:7]
wire [3:0] io_deq_bits_uop_br_tag_0; // @[util.scala:458:7]
wire [3:0] io_deq_bits_uop_br_type_0; // @[util.scala:458:7]
wire io_deq_bits_uop_is_sfb_0; // @[util.scala:458:7]
wire io_deq_bits_uop_is_fence_0; // @[util.scala:458:7]
wire io_deq_bits_uop_is_fencei_0; // @[util.scala:458:7]
wire io_deq_bits_uop_is_sfence_0; // @[util.scala:458:7]
wire io_deq_bits_uop_is_amo_0; // @[util.scala:458:7]
wire io_deq_bits_uop_is_eret_0; // @[util.scala:458:7]
wire io_deq_bits_uop_is_sys_pc2epc_0; // @[util.scala:458:7]
wire io_deq_bits_uop_is_rocc_0; // @[util.scala:458:7]
wire io_deq_bits_uop_is_mov_0; // @[util.scala:458:7]
wire [4:0] io_deq_bits_uop_ftq_idx_0; // @[util.scala:458:7]
wire io_deq_bits_uop_edge_inst_0; // @[util.scala:458:7]
wire [5:0] io_deq_bits_uop_pc_lob_0; // @[util.scala:458:7]
wire io_deq_bits_uop_taken_0; // @[util.scala:458:7]
wire io_deq_bits_uop_imm_rename_0; // @[util.scala:458:7]
wire [2:0] io_deq_bits_uop_imm_sel_0; // @[util.scala:458:7]
wire [4:0] io_deq_bits_uop_pimm_0; // @[util.scala:458:7]
wire [19:0] io_deq_bits_uop_imm_packed_0; // @[util.scala:458:7]
wire [1:0] io_deq_bits_uop_op1_sel_0; // @[util.scala:458:7]
wire [2:0] io_deq_bits_uop_op2_sel_0; // @[util.scala:458:7]
wire [6:0] io_deq_bits_uop_rob_idx_0; // @[util.scala:458:7]
wire [4:0] io_deq_bits_uop_ldq_idx_0; // @[util.scala:458:7]
wire [4:0] io_deq_bits_uop_stq_idx_0; // @[util.scala:458:7]
wire [1:0] io_deq_bits_uop_rxq_idx_0; // @[util.scala:458:7]
wire [6:0] io_deq_bits_uop_pdst_0; // @[util.scala:458:7]
wire [6:0] io_deq_bits_uop_prs1_0; // @[util.scala:458:7]
wire [6:0] io_deq_bits_uop_prs2_0; // @[util.scala:458:7]
wire [6:0] io_deq_bits_uop_prs3_0; // @[util.scala:458:7]
wire [4:0] io_deq_bits_uop_ppred_0; // @[util.scala:458:7]
wire io_deq_bits_uop_prs1_busy_0; // @[util.scala:458:7]
wire io_deq_bits_uop_prs2_busy_0; // @[util.scala:458:7]
wire io_deq_bits_uop_prs3_busy_0; // @[util.scala:458:7]
wire io_deq_bits_uop_ppred_busy_0; // @[util.scala:458:7]
wire [6:0] io_deq_bits_uop_stale_pdst_0; // @[util.scala:458:7]
wire io_deq_bits_uop_exception_0; // @[util.scala:458:7]
wire [63:0] io_deq_bits_uop_exc_cause_0; // @[util.scala:458:7]
wire [4:0] io_deq_bits_uop_mem_cmd_0; // @[util.scala:458:7]
wire [1:0] io_deq_bits_uop_mem_size_0; // @[util.scala:458:7]
wire io_deq_bits_uop_mem_signed_0; // @[util.scala:458:7]
wire io_deq_bits_uop_uses_ldq_0; // @[util.scala:458:7]
wire io_deq_bits_uop_uses_stq_0; // @[util.scala:458:7]
wire io_deq_bits_uop_is_unique_0; // @[util.scala:458:7]
wire io_deq_bits_uop_flush_on_commit_0; // @[util.scala:458:7]
wire [2:0] io_deq_bits_uop_csr_cmd_0; // @[util.scala:458:7]
wire io_deq_bits_uop_ldst_is_rs1_0; // @[util.scala:458:7]
wire [5:0] io_deq_bits_uop_ldst_0; // @[util.scala:458:7]
wire [5:0] io_deq_bits_uop_lrs1_0; // @[util.scala:458:7]
wire [5:0] io_deq_bits_uop_lrs2_0; // @[util.scala:458:7]
wire [5:0] io_deq_bits_uop_lrs3_0; // @[util.scala:458:7]
wire [1:0] io_deq_bits_uop_dst_rtype_0; // @[util.scala:458:7]
wire [1:0] io_deq_bits_uop_lrs1_rtype_0; // @[util.scala:458:7]
wire [1:0] io_deq_bits_uop_lrs2_rtype_0; // @[util.scala:458:7]
wire io_deq_bits_uop_frs3_en_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fcn_dw_0; // @[util.scala:458:7]
wire [4:0] io_deq_bits_uop_fcn_op_0; // @[util.scala:458:7]
wire io_deq_bits_uop_fp_val_0; // @[util.scala:458:7]
wire [2:0] io_deq_bits_uop_fp_rm_0; // @[util.scala:458:7]
wire [1:0] io_deq_bits_uop_fp_typ_0; // @[util.scala:458:7]
wire io_deq_bits_uop_xcpt_pf_if_0; // @[util.scala:458:7]
wire io_deq_bits_uop_xcpt_ae_if_0; // @[util.scala:458:7]
wire io_deq_bits_uop_xcpt_ma_if_0; // @[util.scala:458:7]
wire io_deq_bits_uop_bp_debug_if_0; // @[util.scala:458:7]
wire io_deq_bits_uop_bp_xcpt_if_0; // @[util.scala:458:7]
wire [2:0] io_deq_bits_uop_debug_fsrc_0; // @[util.scala:458:7]
wire [2:0] io_deq_bits_uop_debug_tsrc_0; // @[util.scala:458:7]
wire io_deq_bits_fflags_valid_0; // @[util.scala:458:7]
wire [4:0] io_deq_bits_fflags_bits_0; // @[util.scala:458:7]
wire [64:0] io_deq_bits_data_0; // @[util.scala:458:7]
wire io_deq_bits_predicated_0; // @[util.scala:458:7]
wire io_deq_valid_0; // @[util.scala:458:7]
wire io_empty; // @[util.scala:458:7]
wire [2:0] io_count_0; // @[util.scala:458:7]
assign out_data = _ram_ext_R0_data[64:0]; // @[util.scala:503:22, :545:19]
assign out_predicated = _ram_ext_R0_data[65]; // @[util.scala:503:22, :545:19]
assign out_fflags_valid = _ram_ext_R0_data[66]; // @[util.scala:503:22, :545:19]
assign out_fflags_bits = _ram_ext_R0_data[71:67]; // @[util.scala:503:22, :545:19]
reg valids_0; // @[util.scala:504:26]
reg valids_1; // @[util.scala:504:26]
reg valids_2; // @[util.scala:504:26]
reg valids_3; // @[util.scala:504:26]
reg valids_4; // @[util.scala:504:26]
reg valids_5; // @[util.scala:504:26]
reg valids_6; // @[util.scala:504:26]
reg valids_7; // @[util.scala:504:26]
reg [31:0] uops_0_inst; // @[util.scala:505:22]
reg [31:0] uops_0_debug_inst; // @[util.scala:505:22]
reg uops_0_is_rvc; // @[util.scala:505:22]
reg [39:0] uops_0_debug_pc; // @[util.scala:505:22]
reg uops_0_iq_type_0; // @[util.scala:505:22]
reg uops_0_iq_type_1; // @[util.scala:505:22]
reg uops_0_iq_type_2; // @[util.scala:505:22]
reg uops_0_iq_type_3; // @[util.scala:505:22]
reg uops_0_fu_code_0; // @[util.scala:505:22]
reg uops_0_fu_code_1; // @[util.scala:505:22]
reg uops_0_fu_code_2; // @[util.scala:505:22]
reg uops_0_fu_code_3; // @[util.scala:505:22]
reg uops_0_fu_code_4; // @[util.scala:505:22]
reg uops_0_fu_code_5; // @[util.scala:505:22]
reg uops_0_fu_code_6; // @[util.scala:505:22]
reg uops_0_fu_code_7; // @[util.scala:505:22]
reg uops_0_fu_code_8; // @[util.scala:505:22]
reg uops_0_fu_code_9; // @[util.scala:505:22]
reg uops_0_iw_issued; // @[util.scala:505:22]
reg uops_0_iw_issued_partial_agen; // @[util.scala:505:22]
reg uops_0_iw_issued_partial_dgen; // @[util.scala:505:22]
reg [2:0] uops_0_iw_p1_speculative_child; // @[util.scala:505:22]
reg [2:0] uops_0_iw_p2_speculative_child; // @[util.scala:505:22]
reg uops_0_iw_p1_bypass_hint; // @[util.scala:505:22]
reg uops_0_iw_p2_bypass_hint; // @[util.scala:505:22]
reg uops_0_iw_p3_bypass_hint; // @[util.scala:505:22]
reg [2:0] uops_0_dis_col_sel; // @[util.scala:505:22]
reg [15:0] uops_0_br_mask; // @[util.scala:505:22]
reg [3:0] uops_0_br_tag; // @[util.scala:505:22]
reg [3:0] uops_0_br_type; // @[util.scala:505:22]
reg uops_0_is_sfb; // @[util.scala:505:22]
reg uops_0_is_fence; // @[util.scala:505:22]
reg uops_0_is_fencei; // @[util.scala:505:22]
reg uops_0_is_sfence; // @[util.scala:505:22]
reg uops_0_is_amo; // @[util.scala:505:22]
reg uops_0_is_eret; // @[util.scala:505:22]
reg uops_0_is_sys_pc2epc; // @[util.scala:505:22]
reg uops_0_is_rocc; // @[util.scala:505:22]
reg uops_0_is_mov; // @[util.scala:505:22]
reg [4:0] uops_0_ftq_idx; // @[util.scala:505:22]
reg uops_0_edge_inst; // @[util.scala:505:22]
reg [5:0] uops_0_pc_lob; // @[util.scala:505:22]
reg uops_0_taken; // @[util.scala:505:22]
reg uops_0_imm_rename; // @[util.scala:505:22]
reg [2:0] uops_0_imm_sel; // @[util.scala:505:22]
reg [4:0] uops_0_pimm; // @[util.scala:505:22]
reg [19:0] uops_0_imm_packed; // @[util.scala:505:22]
reg [1:0] uops_0_op1_sel; // @[util.scala:505:22]
reg [2:0] uops_0_op2_sel; // @[util.scala:505:22]
reg uops_0_fp_ctrl_ldst; // @[util.scala:505:22]
reg uops_0_fp_ctrl_wen; // @[util.scala:505:22]
reg uops_0_fp_ctrl_ren1; // @[util.scala:505:22]
reg uops_0_fp_ctrl_ren2; // @[util.scala:505:22]
reg uops_0_fp_ctrl_ren3; // @[util.scala:505:22]
reg uops_0_fp_ctrl_swap12; // @[util.scala:505:22]
reg uops_0_fp_ctrl_swap23; // @[util.scala:505:22]
reg [1:0] uops_0_fp_ctrl_typeTagIn; // @[util.scala:505:22]
reg [1:0] uops_0_fp_ctrl_typeTagOut; // @[util.scala:505:22]
reg uops_0_fp_ctrl_fromint; // @[util.scala:505:22]
reg uops_0_fp_ctrl_toint; // @[util.scala:505:22]
reg uops_0_fp_ctrl_fastpipe; // @[util.scala:505:22]
reg uops_0_fp_ctrl_fma; // @[util.scala:505:22]
reg uops_0_fp_ctrl_div; // @[util.scala:505:22]
reg uops_0_fp_ctrl_sqrt; // @[util.scala:505:22]
reg uops_0_fp_ctrl_wflags; // @[util.scala:505:22]
reg uops_0_fp_ctrl_vec; // @[util.scala:505:22]
reg [6:0] uops_0_rob_idx; // @[util.scala:505:22]
reg [4:0] uops_0_ldq_idx; // @[util.scala:505:22]
reg [4:0] uops_0_stq_idx; // @[util.scala:505:22]
reg [1:0] uops_0_rxq_idx; // @[util.scala:505:22]
reg [6:0] uops_0_pdst; // @[util.scala:505:22]
reg [6:0] uops_0_prs1; // @[util.scala:505:22]
reg [6:0] uops_0_prs2; // @[util.scala:505:22]
reg [6:0] uops_0_prs3; // @[util.scala:505:22]
reg [4:0] uops_0_ppred; // @[util.scala:505:22]
reg uops_0_prs1_busy; // @[util.scala:505:22]
reg uops_0_prs2_busy; // @[util.scala:505:22]
reg uops_0_prs3_busy; // @[util.scala:505:22]
reg uops_0_ppred_busy; // @[util.scala:505:22]
reg [6:0] uops_0_stale_pdst; // @[util.scala:505:22]
reg uops_0_exception; // @[util.scala:505:22]
reg [63:0] uops_0_exc_cause; // @[util.scala:505:22]
reg [4:0] uops_0_mem_cmd; // @[util.scala:505:22]
reg [1:0] uops_0_mem_size; // @[util.scala:505:22]
reg uops_0_mem_signed; // @[util.scala:505:22]
reg uops_0_uses_ldq; // @[util.scala:505:22]
reg uops_0_uses_stq; // @[util.scala:505:22]
reg uops_0_is_unique; // @[util.scala:505:22]
reg uops_0_flush_on_commit; // @[util.scala:505:22]
reg [2:0] uops_0_csr_cmd; // @[util.scala:505:22]
reg uops_0_ldst_is_rs1; // @[util.scala:505:22]
reg [5:0] uops_0_ldst; // @[util.scala:505:22]
reg [5:0] uops_0_lrs1; // @[util.scala:505:22]
reg [5:0] uops_0_lrs2; // @[util.scala:505:22]
reg [5:0] uops_0_lrs3; // @[util.scala:505:22]
reg [1:0] uops_0_dst_rtype; // @[util.scala:505:22]
reg [1:0] uops_0_lrs1_rtype; // @[util.scala:505:22]
reg [1:0] uops_0_lrs2_rtype; // @[util.scala:505:22]
reg uops_0_frs3_en; // @[util.scala:505:22]
reg uops_0_fcn_dw; // @[util.scala:505:22]
reg [4:0] uops_0_fcn_op; // @[util.scala:505:22]
reg uops_0_fp_val; // @[util.scala:505:22]
reg [2:0] uops_0_fp_rm; // @[util.scala:505:22]
reg [1:0] uops_0_fp_typ; // @[util.scala:505:22]
reg uops_0_xcpt_pf_if; // @[util.scala:505:22]
reg uops_0_xcpt_ae_if; // @[util.scala:505:22]
reg uops_0_xcpt_ma_if; // @[util.scala:505:22]
reg uops_0_bp_debug_if; // @[util.scala:505:22]
reg uops_0_bp_xcpt_if; // @[util.scala:505:22]
reg [2:0] uops_0_debug_fsrc; // @[util.scala:505:22]
reg [2:0] uops_0_debug_tsrc; // @[util.scala:505:22]
reg [31:0] uops_1_inst; // @[util.scala:505:22]
reg [31:0] uops_1_debug_inst; // @[util.scala:505:22]
reg uops_1_is_rvc; // @[util.scala:505:22]
reg [39:0] uops_1_debug_pc; // @[util.scala:505:22]
reg uops_1_iq_type_0; // @[util.scala:505:22]
reg uops_1_iq_type_1; // @[util.scala:505:22]
reg uops_1_iq_type_2; // @[util.scala:505:22]
reg uops_1_iq_type_3; // @[util.scala:505:22]
reg uops_1_fu_code_0; // @[util.scala:505:22]
reg uops_1_fu_code_1; // @[util.scala:505:22]
reg uops_1_fu_code_2; // @[util.scala:505:22]
reg uops_1_fu_code_3; // @[util.scala:505:22]
reg uops_1_fu_code_4; // @[util.scala:505:22]
reg uops_1_fu_code_5; // @[util.scala:505:22]
reg uops_1_fu_code_6; // @[util.scala:505:22]
reg uops_1_fu_code_7; // @[util.scala:505:22]
reg uops_1_fu_code_8; // @[util.scala:505:22]
reg uops_1_fu_code_9; // @[util.scala:505:22]
reg uops_1_iw_issued; // @[util.scala:505:22]
reg uops_1_iw_issued_partial_agen; // @[util.scala:505:22]
reg uops_1_iw_issued_partial_dgen; // @[util.scala:505:22]
reg [2:0] uops_1_iw_p1_speculative_child; // @[util.scala:505:22]
reg [2:0] uops_1_iw_p2_speculative_child; // @[util.scala:505:22]
reg uops_1_iw_p1_bypass_hint; // @[util.scala:505:22]
reg uops_1_iw_p2_bypass_hint; // @[util.scala:505:22]
reg uops_1_iw_p3_bypass_hint; // @[util.scala:505:22]
reg [2:0] uops_1_dis_col_sel; // @[util.scala:505:22]
reg [15:0] uops_1_br_mask; // @[util.scala:505:22]
reg [3:0] uops_1_br_tag; // @[util.scala:505:22]
reg [3:0] uops_1_br_type; // @[util.scala:505:22]
reg uops_1_is_sfb; // @[util.scala:505:22]
reg uops_1_is_fence; // @[util.scala:505:22]
reg uops_1_is_fencei; // @[util.scala:505:22]
reg uops_1_is_sfence; // @[util.scala:505:22]
reg uops_1_is_amo; // @[util.scala:505:22]
reg uops_1_is_eret; // @[util.scala:505:22]
reg uops_1_is_sys_pc2epc; // @[util.scala:505:22]
reg uops_1_is_rocc; // @[util.scala:505:22]
reg uops_1_is_mov; // @[util.scala:505:22]
reg [4:0] uops_1_ftq_idx; // @[util.scala:505:22]
reg uops_1_edge_inst; // @[util.scala:505:22]
reg [5:0] uops_1_pc_lob; // @[util.scala:505:22]
reg uops_1_taken; // @[util.scala:505:22]
reg uops_1_imm_rename; // @[util.scala:505:22]
reg [2:0] uops_1_imm_sel; // @[util.scala:505:22]
reg [4:0] uops_1_pimm; // @[util.scala:505:22]
reg [19:0] uops_1_imm_packed; // @[util.scala:505:22]
reg [1:0] uops_1_op1_sel; // @[util.scala:505:22]
reg [2:0] uops_1_op2_sel; // @[util.scala:505:22]
reg uops_1_fp_ctrl_ldst; // @[util.scala:505:22]
reg uops_1_fp_ctrl_wen; // @[util.scala:505:22]
reg uops_1_fp_ctrl_ren1; // @[util.scala:505:22]
reg uops_1_fp_ctrl_ren2; // @[util.scala:505:22]
reg uops_1_fp_ctrl_ren3; // @[util.scala:505:22]
reg uops_1_fp_ctrl_swap12; // @[util.scala:505:22]
reg uops_1_fp_ctrl_swap23; // @[util.scala:505:22]
reg [1:0] uops_1_fp_ctrl_typeTagIn; // @[util.scala:505:22]
reg [1:0] uops_1_fp_ctrl_typeTagOut; // @[util.scala:505:22]
reg uops_1_fp_ctrl_fromint; // @[util.scala:505:22]
reg uops_1_fp_ctrl_toint; // @[util.scala:505:22]
reg uops_1_fp_ctrl_fastpipe; // @[util.scala:505:22]
reg uops_1_fp_ctrl_fma; // @[util.scala:505:22]
reg uops_1_fp_ctrl_div; // @[util.scala:505:22]
reg uops_1_fp_ctrl_sqrt; // @[util.scala:505:22]
reg uops_1_fp_ctrl_wflags; // @[util.scala:505:22]
reg uops_1_fp_ctrl_vec; // @[util.scala:505:22]
reg [6:0] uops_1_rob_idx; // @[util.scala:505:22]
reg [4:0] uops_1_ldq_idx; // @[util.scala:505:22]
reg [4:0] uops_1_stq_idx; // @[util.scala:505:22]
reg [1:0] uops_1_rxq_idx; // @[util.scala:505:22]
reg [6:0] uops_1_pdst; // @[util.scala:505:22]
reg [6:0] uops_1_prs1; // @[util.scala:505:22]
reg [6:0] uops_1_prs2; // @[util.scala:505:22]
reg [6:0] uops_1_prs3; // @[util.scala:505:22]
reg [4:0] uops_1_ppred; // @[util.scala:505:22]
reg uops_1_prs1_busy; // @[util.scala:505:22]
reg uops_1_prs2_busy; // @[util.scala:505:22]
reg uops_1_prs3_busy; // @[util.scala:505:22]
reg uops_1_ppred_busy; // @[util.scala:505:22]
reg [6:0] uops_1_stale_pdst; // @[util.scala:505:22]
reg uops_1_exception; // @[util.scala:505:22]
reg [63:0] uops_1_exc_cause; // @[util.scala:505:22]
reg [4:0] uops_1_mem_cmd; // @[util.scala:505:22]
reg [1:0] uops_1_mem_size; // @[util.scala:505:22]
reg uops_1_mem_signed; // @[util.scala:505:22]
reg uops_1_uses_ldq; // @[util.scala:505:22]
reg uops_1_uses_stq; // @[util.scala:505:22]
reg uops_1_is_unique; // @[util.scala:505:22]
reg uops_1_flush_on_commit; // @[util.scala:505:22]
reg [2:0] uops_1_csr_cmd; // @[util.scala:505:22]
reg uops_1_ldst_is_rs1; // @[util.scala:505:22]
reg [5:0] uops_1_ldst; // @[util.scala:505:22]
reg [5:0] uops_1_lrs1; // @[util.scala:505:22]
reg [5:0] uops_1_lrs2; // @[util.scala:505:22]
reg [5:0] uops_1_lrs3; // @[util.scala:505:22]
reg [1:0] uops_1_dst_rtype; // @[util.scala:505:22]
reg [1:0] uops_1_lrs1_rtype; // @[util.scala:505:22]
reg [1:0] uops_1_lrs2_rtype; // @[util.scala:505:22]
reg uops_1_frs3_en; // @[util.scala:505:22]
reg uops_1_fcn_dw; // @[util.scala:505:22]
reg [4:0] uops_1_fcn_op; // @[util.scala:505:22]
reg uops_1_fp_val; // @[util.scala:505:22]
reg [2:0] uops_1_fp_rm; // @[util.scala:505:22]
reg [1:0] uops_1_fp_typ; // @[util.scala:505:22]
reg uops_1_xcpt_pf_if; // @[util.scala:505:22]
reg uops_1_xcpt_ae_if; // @[util.scala:505:22]
reg uops_1_xcpt_ma_if; // @[util.scala:505:22]
reg uops_1_bp_debug_if; // @[util.scala:505:22]
reg uops_1_bp_xcpt_if; // @[util.scala:505:22]
reg [2:0] uops_1_debug_fsrc; // @[util.scala:505:22]
reg [2:0] uops_1_debug_tsrc; // @[util.scala:505:22]
reg [31:0] uops_2_inst; // @[util.scala:505:22]
reg [31:0] uops_2_debug_inst; // @[util.scala:505:22]
reg uops_2_is_rvc; // @[util.scala:505:22]
reg [39:0] uops_2_debug_pc; // @[util.scala:505:22]
reg uops_2_iq_type_0; // @[util.scala:505:22]
reg uops_2_iq_type_1; // @[util.scala:505:22]
reg uops_2_iq_type_2; // @[util.scala:505:22]
reg uops_2_iq_type_3; // @[util.scala:505:22]
reg uops_2_fu_code_0; // @[util.scala:505:22]
reg uops_2_fu_code_1; // @[util.scala:505:22]
reg uops_2_fu_code_2; // @[util.scala:505:22]
reg uops_2_fu_code_3; // @[util.scala:505:22]
reg uops_2_fu_code_4; // @[util.scala:505:22]
reg uops_2_fu_code_5; // @[util.scala:505:22]
reg uops_2_fu_code_6; // @[util.scala:505:22]
reg uops_2_fu_code_7; // @[util.scala:505:22]
reg uops_2_fu_code_8; // @[util.scala:505:22]
reg uops_2_fu_code_9; // @[util.scala:505:22]
reg uops_2_iw_issued; // @[util.scala:505:22]
reg uops_2_iw_issued_partial_agen; // @[util.scala:505:22]
reg uops_2_iw_issued_partial_dgen; // @[util.scala:505:22]
reg [2:0] uops_2_iw_p1_speculative_child; // @[util.scala:505:22]
reg [2:0] uops_2_iw_p2_speculative_child; // @[util.scala:505:22]
reg uops_2_iw_p1_bypass_hint; // @[util.scala:505:22]
reg uops_2_iw_p2_bypass_hint; // @[util.scala:505:22]
reg uops_2_iw_p3_bypass_hint; // @[util.scala:505:22]
reg [2:0] uops_2_dis_col_sel; // @[util.scala:505:22]
reg [15:0] uops_2_br_mask; // @[util.scala:505:22]
reg [3:0] uops_2_br_tag; // @[util.scala:505:22]
reg [3:0] uops_2_br_type; // @[util.scala:505:22]
reg uops_2_is_sfb; // @[util.scala:505:22]
reg uops_2_is_fence; // @[util.scala:505:22]
reg uops_2_is_fencei; // @[util.scala:505:22]
reg uops_2_is_sfence; // @[util.scala:505:22]
reg uops_2_is_amo; // @[util.scala:505:22]
reg uops_2_is_eret; // @[util.scala:505:22]
reg uops_2_is_sys_pc2epc; // @[util.scala:505:22]
reg uops_2_is_rocc; // @[util.scala:505:22]
reg uops_2_is_mov; // @[util.scala:505:22]
reg [4:0] uops_2_ftq_idx; // @[util.scala:505:22]
reg uops_2_edge_inst; // @[util.scala:505:22]
reg [5:0] uops_2_pc_lob; // @[util.scala:505:22]
reg uops_2_taken; // @[util.scala:505:22]
reg uops_2_imm_rename; // @[util.scala:505:22]
reg [2:0] uops_2_imm_sel; // @[util.scala:505:22]
reg [4:0] uops_2_pimm; // @[util.scala:505:22]
reg [19:0] uops_2_imm_packed; // @[util.scala:505:22]
reg [1:0] uops_2_op1_sel; // @[util.scala:505:22]
reg [2:0] uops_2_op2_sel; // @[util.scala:505:22]
reg uops_2_fp_ctrl_ldst; // @[util.scala:505:22]
reg uops_2_fp_ctrl_wen; // @[util.scala:505:22]
reg uops_2_fp_ctrl_ren1; // @[util.scala:505:22]
reg uops_2_fp_ctrl_ren2; // @[util.scala:505:22]
reg uops_2_fp_ctrl_ren3; // @[util.scala:505:22]
reg uops_2_fp_ctrl_swap12; // @[util.scala:505:22]
reg uops_2_fp_ctrl_swap23; // @[util.scala:505:22]
reg [1:0] uops_2_fp_ctrl_typeTagIn; // @[util.scala:505:22]
reg [1:0] uops_2_fp_ctrl_typeTagOut; // @[util.scala:505:22]
reg uops_2_fp_ctrl_fromint; // @[util.scala:505:22]
reg uops_2_fp_ctrl_toint; // @[util.scala:505:22]
reg uops_2_fp_ctrl_fastpipe; // @[util.scala:505:22]
reg uops_2_fp_ctrl_fma; // @[util.scala:505:22]
reg uops_2_fp_ctrl_div; // @[util.scala:505:22]
reg uops_2_fp_ctrl_sqrt; // @[util.scala:505:22]
reg uops_2_fp_ctrl_wflags; // @[util.scala:505:22]
reg uops_2_fp_ctrl_vec; // @[util.scala:505:22]
reg [6:0] uops_2_rob_idx; // @[util.scala:505:22]
reg [4:0] uops_2_ldq_idx; // @[util.scala:505:22]
reg [4:0] uops_2_stq_idx; // @[util.scala:505:22]
reg [1:0] uops_2_rxq_idx; // @[util.scala:505:22]
reg [6:0] uops_2_pdst; // @[util.scala:505:22]
reg [6:0] uops_2_prs1; // @[util.scala:505:22]
reg [6:0] uops_2_prs2; // @[util.scala:505:22]
reg [6:0] uops_2_prs3; // @[util.scala:505:22]
reg [4:0] uops_2_ppred; // @[util.scala:505:22]
reg uops_2_prs1_busy; // @[util.scala:505:22]
reg uops_2_prs2_busy; // @[util.scala:505:22]
reg uops_2_prs3_busy; // @[util.scala:505:22]
reg uops_2_ppred_busy; // @[util.scala:505:22]
reg [6:0] uops_2_stale_pdst; // @[util.scala:505:22]
reg uops_2_exception; // @[util.scala:505:22]
reg [63:0] uops_2_exc_cause; // @[util.scala:505:22]
reg [4:0] uops_2_mem_cmd; // @[util.scala:505:22]
reg [1:0] uops_2_mem_size; // @[util.scala:505:22]
reg uops_2_mem_signed; // @[util.scala:505:22]
reg uops_2_uses_ldq; // @[util.scala:505:22]
reg uops_2_uses_stq; // @[util.scala:505:22]
reg uops_2_is_unique; // @[util.scala:505:22]
reg uops_2_flush_on_commit; // @[util.scala:505:22]
reg [2:0] uops_2_csr_cmd; // @[util.scala:505:22]
reg uops_2_ldst_is_rs1; // @[util.scala:505:22]
reg [5:0] uops_2_ldst; // @[util.scala:505:22]
reg [5:0] uops_2_lrs1; // @[util.scala:505:22]
reg [5:0] uops_2_lrs2; // @[util.scala:505:22]
reg [5:0] uops_2_lrs3; // @[util.scala:505:22]
reg [1:0] uops_2_dst_rtype; // @[util.scala:505:22]
reg [1:0] uops_2_lrs1_rtype; // @[util.scala:505:22]
reg [1:0] uops_2_lrs2_rtype; // @[util.scala:505:22]
reg uops_2_frs3_en; // @[util.scala:505:22]
reg uops_2_fcn_dw; // @[util.scala:505:22]
reg [4:0] uops_2_fcn_op; // @[util.scala:505:22]
reg uops_2_fp_val; // @[util.scala:505:22]
reg [2:0] uops_2_fp_rm; // @[util.scala:505:22]
reg [1:0] uops_2_fp_typ; // @[util.scala:505:22]
reg uops_2_xcpt_pf_if; // @[util.scala:505:22]
reg uops_2_xcpt_ae_if; // @[util.scala:505:22]
reg uops_2_xcpt_ma_if; // @[util.scala:505:22]
reg uops_2_bp_debug_if; // @[util.scala:505:22]
reg uops_2_bp_xcpt_if; // @[util.scala:505:22]
reg [2:0] uops_2_debug_fsrc; // @[util.scala:505:22]
reg [2:0] uops_2_debug_tsrc; // @[util.scala:505:22]
reg [31:0] uops_3_inst; // @[util.scala:505:22]
reg [31:0] uops_3_debug_inst; // @[util.scala:505:22]
reg uops_3_is_rvc; // @[util.scala:505:22]
reg [39:0] uops_3_debug_pc; // @[util.scala:505:22]
reg uops_3_iq_type_0; // @[util.scala:505:22]
reg uops_3_iq_type_1; // @[util.scala:505:22]
reg uops_3_iq_type_2; // @[util.scala:505:22]
reg uops_3_iq_type_3; // @[util.scala:505:22]
reg uops_3_fu_code_0; // @[util.scala:505:22]
reg uops_3_fu_code_1; // @[util.scala:505:22]
reg uops_3_fu_code_2; // @[util.scala:505:22]
reg uops_3_fu_code_3; // @[util.scala:505:22]
reg uops_3_fu_code_4; // @[util.scala:505:22]
reg uops_3_fu_code_5; // @[util.scala:505:22]
reg uops_3_fu_code_6; // @[util.scala:505:22]
reg uops_3_fu_code_7; // @[util.scala:505:22]
reg uops_3_fu_code_8; // @[util.scala:505:22]
reg uops_3_fu_code_9; // @[util.scala:505:22]
reg uops_3_iw_issued; // @[util.scala:505:22]
reg uops_3_iw_issued_partial_agen; // @[util.scala:505:22]
reg uops_3_iw_issued_partial_dgen; // @[util.scala:505:22]
reg [2:0] uops_3_iw_p1_speculative_child; // @[util.scala:505:22]
reg [2:0] uops_3_iw_p2_speculative_child; // @[util.scala:505:22]
reg uops_3_iw_p1_bypass_hint; // @[util.scala:505:22]
reg uops_3_iw_p2_bypass_hint; // @[util.scala:505:22]
reg uops_3_iw_p3_bypass_hint; // @[util.scala:505:22]
reg [2:0] uops_3_dis_col_sel; // @[util.scala:505:22]
reg [15:0] uops_3_br_mask; // @[util.scala:505:22]
reg [3:0] uops_3_br_tag; // @[util.scala:505:22]
reg [3:0] uops_3_br_type; // @[util.scala:505:22]
reg uops_3_is_sfb; // @[util.scala:505:22]
reg uops_3_is_fence; // @[util.scala:505:22]
reg uops_3_is_fencei; // @[util.scala:505:22]
reg uops_3_is_sfence; // @[util.scala:505:22]
reg uops_3_is_amo; // @[util.scala:505:22]
reg uops_3_is_eret; // @[util.scala:505:22]
reg uops_3_is_sys_pc2epc; // @[util.scala:505:22]
reg uops_3_is_rocc; // @[util.scala:505:22]
reg uops_3_is_mov; // @[util.scala:505:22]
reg [4:0] uops_3_ftq_idx; // @[util.scala:505:22]
reg uops_3_edge_inst; // @[util.scala:505:22]
reg [5:0] uops_3_pc_lob; // @[util.scala:505:22]
reg uops_3_taken; // @[util.scala:505:22]
reg uops_3_imm_rename; // @[util.scala:505:22]
reg [2:0] uops_3_imm_sel; // @[util.scala:505:22]
reg [4:0] uops_3_pimm; // @[util.scala:505:22]
reg [19:0] uops_3_imm_packed; // @[util.scala:505:22]
reg [1:0] uops_3_op1_sel; // @[util.scala:505:22]
reg [2:0] uops_3_op2_sel; // @[util.scala:505:22]
reg uops_3_fp_ctrl_ldst; // @[util.scala:505:22]
reg uops_3_fp_ctrl_wen; // @[util.scala:505:22]
reg uops_3_fp_ctrl_ren1; // @[util.scala:505:22]
reg uops_3_fp_ctrl_ren2; // @[util.scala:505:22]
reg uops_3_fp_ctrl_ren3; // @[util.scala:505:22]
reg uops_3_fp_ctrl_swap12; // @[util.scala:505:22]
reg uops_3_fp_ctrl_swap23; // @[util.scala:505:22]
reg [1:0] uops_3_fp_ctrl_typeTagIn; // @[util.scala:505:22]
reg [1:0] uops_3_fp_ctrl_typeTagOut; // @[util.scala:505:22]
reg uops_3_fp_ctrl_fromint; // @[util.scala:505:22]
reg uops_3_fp_ctrl_toint; // @[util.scala:505:22]
reg uops_3_fp_ctrl_fastpipe; // @[util.scala:505:22]
reg uops_3_fp_ctrl_fma; // @[util.scala:505:22]
reg uops_3_fp_ctrl_div; // @[util.scala:505:22]
reg uops_3_fp_ctrl_sqrt; // @[util.scala:505:22]
reg uops_3_fp_ctrl_wflags; // @[util.scala:505:22]
reg uops_3_fp_ctrl_vec; // @[util.scala:505:22]
reg [6:0] uops_3_rob_idx; // @[util.scala:505:22]
reg [4:0] uops_3_ldq_idx; // @[util.scala:505:22]
reg [4:0] uops_3_stq_idx; // @[util.scala:505:22]
reg [1:0] uops_3_rxq_idx; // @[util.scala:505:22]
reg [6:0] uops_3_pdst; // @[util.scala:505:22]
reg [6:0] uops_3_prs1; // @[util.scala:505:22]
reg [6:0] uops_3_prs2; // @[util.scala:505:22]
reg [6:0] uops_3_prs3; // @[util.scala:505:22]
reg [4:0] uops_3_ppred; // @[util.scala:505:22]
reg uops_3_prs1_busy; // @[util.scala:505:22]
reg uops_3_prs2_busy; // @[util.scala:505:22]
reg uops_3_prs3_busy; // @[util.scala:505:22]
reg uops_3_ppred_busy; // @[util.scala:505:22]
reg [6:0] uops_3_stale_pdst; // @[util.scala:505:22]
reg uops_3_exception; // @[util.scala:505:22]
reg [63:0] uops_3_exc_cause; // @[util.scala:505:22]
reg [4:0] uops_3_mem_cmd; // @[util.scala:505:22]
reg [1:0] uops_3_mem_size; // @[util.scala:505:22]
reg uops_3_mem_signed; // @[util.scala:505:22]
reg uops_3_uses_ldq; // @[util.scala:505:22]
reg uops_3_uses_stq; // @[util.scala:505:22]
reg uops_3_is_unique; // @[util.scala:505:22]
reg uops_3_flush_on_commit; // @[util.scala:505:22]
reg [2:0] uops_3_csr_cmd; // @[util.scala:505:22]
reg uops_3_ldst_is_rs1; // @[util.scala:505:22]
reg [5:0] uops_3_ldst; // @[util.scala:505:22]
reg [5:0] uops_3_lrs1; // @[util.scala:505:22]
reg [5:0] uops_3_lrs2; // @[util.scala:505:22]
reg [5:0] uops_3_lrs3; // @[util.scala:505:22]
reg [1:0] uops_3_dst_rtype; // @[util.scala:505:22]
reg [1:0] uops_3_lrs1_rtype; // @[util.scala:505:22]
reg [1:0] uops_3_lrs2_rtype; // @[util.scala:505:22]
reg uops_3_frs3_en; // @[util.scala:505:22]
reg uops_3_fcn_dw; // @[util.scala:505:22]
reg [4:0] uops_3_fcn_op; // @[util.scala:505:22]
reg uops_3_fp_val; // @[util.scala:505:22]
reg [2:0] uops_3_fp_rm; // @[util.scala:505:22]
reg [1:0] uops_3_fp_typ; // @[util.scala:505:22]
reg uops_3_xcpt_pf_if; // @[util.scala:505:22]
reg uops_3_xcpt_ae_if; // @[util.scala:505:22]
reg uops_3_xcpt_ma_if; // @[util.scala:505:22]
reg uops_3_bp_debug_if; // @[util.scala:505:22]
reg uops_3_bp_xcpt_if; // @[util.scala:505:22]
reg [2:0] uops_3_debug_fsrc; // @[util.scala:505:22]
reg [2:0] uops_3_debug_tsrc; // @[util.scala:505:22]
reg [31:0] uops_4_inst; // @[util.scala:505:22]
reg [31:0] uops_4_debug_inst; // @[util.scala:505:22]
reg uops_4_is_rvc; // @[util.scala:505:22]
reg [39:0] uops_4_debug_pc; // @[util.scala:505:22]
reg uops_4_iq_type_0; // @[util.scala:505:22]
reg uops_4_iq_type_1; // @[util.scala:505:22]
reg uops_4_iq_type_2; // @[util.scala:505:22]
reg uops_4_iq_type_3; // @[util.scala:505:22]
reg uops_4_fu_code_0; // @[util.scala:505:22]
reg uops_4_fu_code_1; // @[util.scala:505:22]
reg uops_4_fu_code_2; // @[util.scala:505:22]
reg uops_4_fu_code_3; // @[util.scala:505:22]
reg uops_4_fu_code_4; // @[util.scala:505:22]
reg uops_4_fu_code_5; // @[util.scala:505:22]
reg uops_4_fu_code_6; // @[util.scala:505:22]
reg uops_4_fu_code_7; // @[util.scala:505:22]
reg uops_4_fu_code_8; // @[util.scala:505:22]
reg uops_4_fu_code_9; // @[util.scala:505:22]
reg uops_4_iw_issued; // @[util.scala:505:22]
reg uops_4_iw_issued_partial_agen; // @[util.scala:505:22]
reg uops_4_iw_issued_partial_dgen; // @[util.scala:505:22]
reg [2:0] uops_4_iw_p1_speculative_child; // @[util.scala:505:22]
reg [2:0] uops_4_iw_p2_speculative_child; // @[util.scala:505:22]
reg uops_4_iw_p1_bypass_hint; // @[util.scala:505:22]
reg uops_4_iw_p2_bypass_hint; // @[util.scala:505:22]
reg uops_4_iw_p3_bypass_hint; // @[util.scala:505:22]
reg [2:0] uops_4_dis_col_sel; // @[util.scala:505:22]
reg [15:0] uops_4_br_mask; // @[util.scala:505:22]
reg [3:0] uops_4_br_tag; // @[util.scala:505:22]
reg [3:0] uops_4_br_type; // @[util.scala:505:22]
reg uops_4_is_sfb; // @[util.scala:505:22]
reg uops_4_is_fence; // @[util.scala:505:22]
reg uops_4_is_fencei; // @[util.scala:505:22]
reg uops_4_is_sfence; // @[util.scala:505:22]
reg uops_4_is_amo; // @[util.scala:505:22]
reg uops_4_is_eret; // @[util.scala:505:22]
reg uops_4_is_sys_pc2epc; // @[util.scala:505:22]
reg uops_4_is_rocc; // @[util.scala:505:22]
reg uops_4_is_mov; // @[util.scala:505:22]
reg [4:0] uops_4_ftq_idx; // @[util.scala:505:22]
reg uops_4_edge_inst; // @[util.scala:505:22]
reg [5:0] uops_4_pc_lob; // @[util.scala:505:22]
reg uops_4_taken; // @[util.scala:505:22]
reg uops_4_imm_rename; // @[util.scala:505:22]
reg [2:0] uops_4_imm_sel; // @[util.scala:505:22]
reg [4:0] uops_4_pimm; // @[util.scala:505:22]
reg [19:0] uops_4_imm_packed; // @[util.scala:505:22]
reg [1:0] uops_4_op1_sel; // @[util.scala:505:22]
reg [2:0] uops_4_op2_sel; // @[util.scala:505:22]
reg uops_4_fp_ctrl_ldst; // @[util.scala:505:22]
reg uops_4_fp_ctrl_wen; // @[util.scala:505:22]
reg uops_4_fp_ctrl_ren1; // @[util.scala:505:22]
reg uops_4_fp_ctrl_ren2; // @[util.scala:505:22]
reg uops_4_fp_ctrl_ren3; // @[util.scala:505:22]
reg uops_4_fp_ctrl_swap12; // @[util.scala:505:22]
reg uops_4_fp_ctrl_swap23; // @[util.scala:505:22]
reg [1:0] uops_4_fp_ctrl_typeTagIn; // @[util.scala:505:22]
reg [1:0] uops_4_fp_ctrl_typeTagOut; // @[util.scala:505:22]
reg uops_4_fp_ctrl_fromint; // @[util.scala:505:22]
reg uops_4_fp_ctrl_toint; // @[util.scala:505:22]
reg uops_4_fp_ctrl_fastpipe; // @[util.scala:505:22]
reg uops_4_fp_ctrl_fma; // @[util.scala:505:22]
reg uops_4_fp_ctrl_div; // @[util.scala:505:22]
reg uops_4_fp_ctrl_sqrt; // @[util.scala:505:22]
reg uops_4_fp_ctrl_wflags; // @[util.scala:505:22]
reg uops_4_fp_ctrl_vec; // @[util.scala:505:22]
reg [6:0] uops_4_rob_idx; // @[util.scala:505:22]
reg [4:0] uops_4_ldq_idx; // @[util.scala:505:22]
reg [4:0] uops_4_stq_idx; // @[util.scala:505:22]
reg [1:0] uops_4_rxq_idx; // @[util.scala:505:22]
reg [6:0] uops_4_pdst; // @[util.scala:505:22]
reg [6:0] uops_4_prs1; // @[util.scala:505:22]
reg [6:0] uops_4_prs2; // @[util.scala:505:22]
reg [6:0] uops_4_prs3; // @[util.scala:505:22]
reg [4:0] uops_4_ppred; // @[util.scala:505:22]
reg uops_4_prs1_busy; // @[util.scala:505:22]
reg uops_4_prs2_busy; // @[util.scala:505:22]
reg uops_4_prs3_busy; // @[util.scala:505:22]
reg uops_4_ppred_busy; // @[util.scala:505:22]
reg [6:0] uops_4_stale_pdst; // @[util.scala:505:22]
reg uops_4_exception; // @[util.scala:505:22]
reg [63:0] uops_4_exc_cause; // @[util.scala:505:22]
reg [4:0] uops_4_mem_cmd; // @[util.scala:505:22]
reg [1:0] uops_4_mem_size; // @[util.scala:505:22]
reg uops_4_mem_signed; // @[util.scala:505:22]
reg uops_4_uses_ldq; // @[util.scala:505:22]
reg uops_4_uses_stq; // @[util.scala:505:22]
reg uops_4_is_unique; // @[util.scala:505:22]
reg uops_4_flush_on_commit; // @[util.scala:505:22]
reg [2:0] uops_4_csr_cmd; // @[util.scala:505:22]
reg uops_4_ldst_is_rs1; // @[util.scala:505:22]
reg [5:0] uops_4_ldst; // @[util.scala:505:22]
reg [5:0] uops_4_lrs1; // @[util.scala:505:22]
reg [5:0] uops_4_lrs2; // @[util.scala:505:22]
reg [5:0] uops_4_lrs3; // @[util.scala:505:22]
reg [1:0] uops_4_dst_rtype; // @[util.scala:505:22]
reg [1:0] uops_4_lrs1_rtype; // @[util.scala:505:22]
reg [1:0] uops_4_lrs2_rtype; // @[util.scala:505:22]
reg uops_4_frs3_en; // @[util.scala:505:22]
reg uops_4_fcn_dw; // @[util.scala:505:22]
reg [4:0] uops_4_fcn_op; // @[util.scala:505:22]
reg uops_4_fp_val; // @[util.scala:505:22]
reg [2:0] uops_4_fp_rm; // @[util.scala:505:22]
reg [1:0] uops_4_fp_typ; // @[util.scala:505:22]
reg uops_4_xcpt_pf_if; // @[util.scala:505:22]
reg uops_4_xcpt_ae_if; // @[util.scala:505:22]
reg uops_4_xcpt_ma_if; // @[util.scala:505:22]
reg uops_4_bp_debug_if; // @[util.scala:505:22]
reg uops_4_bp_xcpt_if; // @[util.scala:505:22]
reg [2:0] uops_4_debug_fsrc; // @[util.scala:505:22]
reg [2:0] uops_4_debug_tsrc; // @[util.scala:505:22]
reg [31:0] uops_5_inst; // @[util.scala:505:22]
reg [31:0] uops_5_debug_inst; // @[util.scala:505:22]
reg uops_5_is_rvc; // @[util.scala:505:22]
reg [39:0] uops_5_debug_pc; // @[util.scala:505:22]
reg uops_5_iq_type_0; // @[util.scala:505:22]
reg uops_5_iq_type_1; // @[util.scala:505:22]
reg uops_5_iq_type_2; // @[util.scala:505:22]
reg uops_5_iq_type_3; // @[util.scala:505:22]
reg uops_5_fu_code_0; // @[util.scala:505:22]
reg uops_5_fu_code_1; // @[util.scala:505:22]
reg uops_5_fu_code_2; // @[util.scala:505:22]
reg uops_5_fu_code_3; // @[util.scala:505:22]
reg uops_5_fu_code_4; // @[util.scala:505:22]
reg uops_5_fu_code_5; // @[util.scala:505:22]
reg uops_5_fu_code_6; // @[util.scala:505:22]
reg uops_5_fu_code_7; // @[util.scala:505:22]
reg uops_5_fu_code_8; // @[util.scala:505:22]
reg uops_5_fu_code_9; // @[util.scala:505:22]
reg uops_5_iw_issued; // @[util.scala:505:22]
reg uops_5_iw_issued_partial_agen; // @[util.scala:505:22]
reg uops_5_iw_issued_partial_dgen; // @[util.scala:505:22]
reg [2:0] uops_5_iw_p1_speculative_child; // @[util.scala:505:22]
reg [2:0] uops_5_iw_p2_speculative_child; // @[util.scala:505:22]
reg uops_5_iw_p1_bypass_hint; // @[util.scala:505:22]
reg uops_5_iw_p2_bypass_hint; // @[util.scala:505:22]
reg uops_5_iw_p3_bypass_hint; // @[util.scala:505:22]
reg [2:0] uops_5_dis_col_sel; // @[util.scala:505:22]
reg [15:0] uops_5_br_mask; // @[util.scala:505:22]
reg [3:0] uops_5_br_tag; // @[util.scala:505:22]
reg [3:0] uops_5_br_type; // @[util.scala:505:22]
reg uops_5_is_sfb; // @[util.scala:505:22]
reg uops_5_is_fence; // @[util.scala:505:22]
reg uops_5_is_fencei; // @[util.scala:505:22]
reg uops_5_is_sfence; // @[util.scala:505:22]
reg uops_5_is_amo; // @[util.scala:505:22]
reg uops_5_is_eret; // @[util.scala:505:22]
reg uops_5_is_sys_pc2epc; // @[util.scala:505:22]
reg uops_5_is_rocc; // @[util.scala:505:22]
reg uops_5_is_mov; // @[util.scala:505:22]
reg [4:0] uops_5_ftq_idx; // @[util.scala:505:22]
reg uops_5_edge_inst; // @[util.scala:505:22]
reg [5:0] uops_5_pc_lob; // @[util.scala:505:22]
reg uops_5_taken; // @[util.scala:505:22]
reg uops_5_imm_rename; // @[util.scala:505:22]
reg [2:0] uops_5_imm_sel; // @[util.scala:505:22]
reg [4:0] uops_5_pimm; // @[util.scala:505:22]
reg [19:0] uops_5_imm_packed; // @[util.scala:505:22]
reg [1:0] uops_5_op1_sel; // @[util.scala:505:22]
reg [2:0] uops_5_op2_sel; // @[util.scala:505:22]
reg uops_5_fp_ctrl_ldst; // @[util.scala:505:22]
reg uops_5_fp_ctrl_wen; // @[util.scala:505:22]
reg uops_5_fp_ctrl_ren1; // @[util.scala:505:22]
reg uops_5_fp_ctrl_ren2; // @[util.scala:505:22]
reg uops_5_fp_ctrl_ren3; // @[util.scala:505:22]
reg uops_5_fp_ctrl_swap12; // @[util.scala:505:22]
reg uops_5_fp_ctrl_swap23; // @[util.scala:505:22]
reg [1:0] uops_5_fp_ctrl_typeTagIn; // @[util.scala:505:22]
reg [1:0] uops_5_fp_ctrl_typeTagOut; // @[util.scala:505:22]
reg uops_5_fp_ctrl_fromint; // @[util.scala:505:22]
reg uops_5_fp_ctrl_toint; // @[util.scala:505:22]
reg uops_5_fp_ctrl_fastpipe; // @[util.scala:505:22]
reg uops_5_fp_ctrl_fma; // @[util.scala:505:22]
reg uops_5_fp_ctrl_div; // @[util.scala:505:22]
reg uops_5_fp_ctrl_sqrt; // @[util.scala:505:22]
reg uops_5_fp_ctrl_wflags; // @[util.scala:505:22]
reg uops_5_fp_ctrl_vec; // @[util.scala:505:22]
reg [6:0] uops_5_rob_idx; // @[util.scala:505:22]
reg [4:0] uops_5_ldq_idx; // @[util.scala:505:22]
reg [4:0] uops_5_stq_idx; // @[util.scala:505:22]
reg [1:0] uops_5_rxq_idx; // @[util.scala:505:22]
reg [6:0] uops_5_pdst; // @[util.scala:505:22]
reg [6:0] uops_5_prs1; // @[util.scala:505:22]
reg [6:0] uops_5_prs2; // @[util.scala:505:22]
reg [6:0] uops_5_prs3; // @[util.scala:505:22]
reg [4:0] uops_5_ppred; // @[util.scala:505:22]
reg uops_5_prs1_busy; // @[util.scala:505:22]
reg uops_5_prs2_busy; // @[util.scala:505:22]
reg uops_5_prs3_busy; // @[util.scala:505:22]
reg uops_5_ppred_busy; // @[util.scala:505:22]
reg [6:0] uops_5_stale_pdst; // @[util.scala:505:22]
reg uops_5_exception; // @[util.scala:505:22]
reg [63:0] uops_5_exc_cause; // @[util.scala:505:22]
reg [4:0] uops_5_mem_cmd; // @[util.scala:505:22]
reg [1:0] uops_5_mem_size; // @[util.scala:505:22]
reg uops_5_mem_signed; // @[util.scala:505:22]
reg uops_5_uses_ldq; // @[util.scala:505:22]
reg uops_5_uses_stq; // @[util.scala:505:22]
reg uops_5_is_unique; // @[util.scala:505:22]
reg uops_5_flush_on_commit; // @[util.scala:505:22]
reg [2:0] uops_5_csr_cmd; // @[util.scala:505:22]
reg uops_5_ldst_is_rs1; // @[util.scala:505:22]
reg [5:0] uops_5_ldst; // @[util.scala:505:22]
reg [5:0] uops_5_lrs1; // @[util.scala:505:22]
reg [5:0] uops_5_lrs2; // @[util.scala:505:22]
reg [5:0] uops_5_lrs3; // @[util.scala:505:22]
reg [1:0] uops_5_dst_rtype; // @[util.scala:505:22]
reg [1:0] uops_5_lrs1_rtype; // @[util.scala:505:22]
reg [1:0] uops_5_lrs2_rtype; // @[util.scala:505:22]
reg uops_5_frs3_en; // @[util.scala:505:22]
reg uops_5_fcn_dw; // @[util.scala:505:22]
reg [4:0] uops_5_fcn_op; // @[util.scala:505:22]
reg uops_5_fp_val; // @[util.scala:505:22]
reg [2:0] uops_5_fp_rm; // @[util.scala:505:22]
reg [1:0] uops_5_fp_typ; // @[util.scala:505:22]
reg uops_5_xcpt_pf_if; // @[util.scala:505:22]
reg uops_5_xcpt_ae_if; // @[util.scala:505:22]
reg uops_5_xcpt_ma_if; // @[util.scala:505:22]
reg uops_5_bp_debug_if; // @[util.scala:505:22]
reg uops_5_bp_xcpt_if; // @[util.scala:505:22]
reg [2:0] uops_5_debug_fsrc; // @[util.scala:505:22]
reg [2:0] uops_5_debug_tsrc; // @[util.scala:505:22]
reg [31:0] uops_6_inst; // @[util.scala:505:22]
reg [31:0] uops_6_debug_inst; // @[util.scala:505:22]
reg uops_6_is_rvc; // @[util.scala:505:22]
reg [39:0] uops_6_debug_pc; // @[util.scala:505:22]
reg uops_6_iq_type_0; // @[util.scala:505:22]
reg uops_6_iq_type_1; // @[util.scala:505:22]
reg uops_6_iq_type_2; // @[util.scala:505:22]
reg uops_6_iq_type_3; // @[util.scala:505:22]
reg uops_6_fu_code_0; // @[util.scala:505:22]
reg uops_6_fu_code_1; // @[util.scala:505:22]
reg uops_6_fu_code_2; // @[util.scala:505:22]
reg uops_6_fu_code_3; // @[util.scala:505:22]
reg uops_6_fu_code_4; // @[util.scala:505:22]
reg uops_6_fu_code_5; // @[util.scala:505:22]
reg uops_6_fu_code_6; // @[util.scala:505:22]
reg uops_6_fu_code_7; // @[util.scala:505:22]
reg uops_6_fu_code_8; // @[util.scala:505:22]
reg uops_6_fu_code_9; // @[util.scala:505:22]
reg uops_6_iw_issued; // @[util.scala:505:22]
reg uops_6_iw_issued_partial_agen; // @[util.scala:505:22]
reg uops_6_iw_issued_partial_dgen; // @[util.scala:505:22]
reg [2:0] uops_6_iw_p1_speculative_child; // @[util.scala:505:22]
reg [2:0] uops_6_iw_p2_speculative_child; // @[util.scala:505:22]
reg uops_6_iw_p1_bypass_hint; // @[util.scala:505:22]
reg uops_6_iw_p2_bypass_hint; // @[util.scala:505:22]
reg uops_6_iw_p3_bypass_hint; // @[util.scala:505:22]
reg [2:0] uops_6_dis_col_sel; // @[util.scala:505:22]
reg [15:0] uops_6_br_mask; // @[util.scala:505:22]
reg [3:0] uops_6_br_tag; // @[util.scala:505:22]
reg [3:0] uops_6_br_type; // @[util.scala:505:22]
reg uops_6_is_sfb; // @[util.scala:505:22]
reg uops_6_is_fence; // @[util.scala:505:22]
reg uops_6_is_fencei; // @[util.scala:505:22]
reg uops_6_is_sfence; // @[util.scala:505:22]
reg uops_6_is_amo; // @[util.scala:505:22]
reg uops_6_is_eret; // @[util.scala:505:22]
reg uops_6_is_sys_pc2epc; // @[util.scala:505:22]
reg uops_6_is_rocc; // @[util.scala:505:22]
reg uops_6_is_mov; // @[util.scala:505:22]
reg [4:0] uops_6_ftq_idx; // @[util.scala:505:22]
reg uops_6_edge_inst; // @[util.scala:505:22]
reg [5:0] uops_6_pc_lob; // @[util.scala:505:22]
reg uops_6_taken; // @[util.scala:505:22]
reg uops_6_imm_rename; // @[util.scala:505:22]
reg [2:0] uops_6_imm_sel; // @[util.scala:505:22]
reg [4:0] uops_6_pimm; // @[util.scala:505:22]
reg [19:0] uops_6_imm_packed; // @[util.scala:505:22]
reg [1:0] uops_6_op1_sel; // @[util.scala:505:22]
reg [2:0] uops_6_op2_sel; // @[util.scala:505:22]
reg uops_6_fp_ctrl_ldst; // @[util.scala:505:22]
reg uops_6_fp_ctrl_wen; // @[util.scala:505:22]
reg uops_6_fp_ctrl_ren1; // @[util.scala:505:22]
reg uops_6_fp_ctrl_ren2; // @[util.scala:505:22]
reg uops_6_fp_ctrl_ren3; // @[util.scala:505:22]
reg uops_6_fp_ctrl_swap12; // @[util.scala:505:22]
reg uops_6_fp_ctrl_swap23; // @[util.scala:505:22]
reg [1:0] uops_6_fp_ctrl_typeTagIn; // @[util.scala:505:22]
reg [1:0] uops_6_fp_ctrl_typeTagOut; // @[util.scala:505:22]
reg uops_6_fp_ctrl_fromint; // @[util.scala:505:22]
reg uops_6_fp_ctrl_toint; // @[util.scala:505:22]
reg uops_6_fp_ctrl_fastpipe; // @[util.scala:505:22]
reg uops_6_fp_ctrl_fma; // @[util.scala:505:22]
reg uops_6_fp_ctrl_div; // @[util.scala:505:22]
reg uops_6_fp_ctrl_sqrt; // @[util.scala:505:22]
reg uops_6_fp_ctrl_wflags; // @[util.scala:505:22]
reg uops_6_fp_ctrl_vec; // @[util.scala:505:22]
reg [6:0] uops_6_rob_idx; // @[util.scala:505:22]
reg [4:0] uops_6_ldq_idx; // @[util.scala:505:22]
reg [4:0] uops_6_stq_idx; // @[util.scala:505:22]
reg [1:0] uops_6_rxq_idx; // @[util.scala:505:22]
reg [6:0] uops_6_pdst; // @[util.scala:505:22]
reg [6:0] uops_6_prs1; // @[util.scala:505:22]
reg [6:0] uops_6_prs2; // @[util.scala:505:22]
reg [6:0] uops_6_prs3; // @[util.scala:505:22]
reg [4:0] uops_6_ppred; // @[util.scala:505:22]
reg uops_6_prs1_busy; // @[util.scala:505:22]
reg uops_6_prs2_busy; // @[util.scala:505:22]
reg uops_6_prs3_busy; // @[util.scala:505:22]
reg uops_6_ppred_busy; // @[util.scala:505:22]
reg [6:0] uops_6_stale_pdst; // @[util.scala:505:22]
reg uops_6_exception; // @[util.scala:505:22]
reg [63:0] uops_6_exc_cause; // @[util.scala:505:22]
reg [4:0] uops_6_mem_cmd; // @[util.scala:505:22]
reg [1:0] uops_6_mem_size; // @[util.scala:505:22]
reg uops_6_mem_signed; // @[util.scala:505:22]
reg uops_6_uses_ldq; // @[util.scala:505:22]
reg uops_6_uses_stq; // @[util.scala:505:22]
reg uops_6_is_unique; // @[util.scala:505:22]
reg uops_6_flush_on_commit; // @[util.scala:505:22]
reg [2:0] uops_6_csr_cmd; // @[util.scala:505:22]
reg uops_6_ldst_is_rs1; // @[util.scala:505:22]
reg [5:0] uops_6_ldst; // @[util.scala:505:22]
reg [5:0] uops_6_lrs1; // @[util.scala:505:22]
reg [5:0] uops_6_lrs2; // @[util.scala:505:22]
reg [5:0] uops_6_lrs3; // @[util.scala:505:22]
reg [1:0] uops_6_dst_rtype; // @[util.scala:505:22]
reg [1:0] uops_6_lrs1_rtype; // @[util.scala:505:22]
reg [1:0] uops_6_lrs2_rtype; // @[util.scala:505:22]
reg uops_6_frs3_en; // @[util.scala:505:22]
reg uops_6_fcn_dw; // @[util.scala:505:22]
reg [4:0] uops_6_fcn_op; // @[util.scala:505:22]
reg uops_6_fp_val; // @[util.scala:505:22]
reg [2:0] uops_6_fp_rm; // @[util.scala:505:22]
reg [1:0] uops_6_fp_typ; // @[util.scala:505:22]
reg uops_6_xcpt_pf_if; // @[util.scala:505:22]
reg uops_6_xcpt_ae_if; // @[util.scala:505:22]
reg uops_6_xcpt_ma_if; // @[util.scala:505:22]
reg uops_6_bp_debug_if; // @[util.scala:505:22]
reg uops_6_bp_xcpt_if; // @[util.scala:505:22]
reg [2:0] uops_6_debug_fsrc; // @[util.scala:505:22]
reg [2:0] uops_6_debug_tsrc; // @[util.scala:505:22]
reg [31:0] uops_7_inst; // @[util.scala:505:22]
reg [31:0] uops_7_debug_inst; // @[util.scala:505:22]
reg uops_7_is_rvc; // @[util.scala:505:22]
reg [39:0] uops_7_debug_pc; // @[util.scala:505:22]
reg uops_7_iq_type_0; // @[util.scala:505:22]
reg uops_7_iq_type_1; // @[util.scala:505:22]
reg uops_7_iq_type_2; // @[util.scala:505:22]
reg uops_7_iq_type_3; // @[util.scala:505:22]
reg uops_7_fu_code_0; // @[util.scala:505:22]
reg uops_7_fu_code_1; // @[util.scala:505:22]
reg uops_7_fu_code_2; // @[util.scala:505:22]
reg uops_7_fu_code_3; // @[util.scala:505:22]
reg uops_7_fu_code_4; // @[util.scala:505:22]
reg uops_7_fu_code_5; // @[util.scala:505:22]
reg uops_7_fu_code_6; // @[util.scala:505:22]
reg uops_7_fu_code_7; // @[util.scala:505:22]
reg uops_7_fu_code_8; // @[util.scala:505:22]
reg uops_7_fu_code_9; // @[util.scala:505:22]
reg uops_7_iw_issued; // @[util.scala:505:22]
reg uops_7_iw_issued_partial_agen; // @[util.scala:505:22]
reg uops_7_iw_issued_partial_dgen; // @[util.scala:505:22]
reg [2:0] uops_7_iw_p1_speculative_child; // @[util.scala:505:22]
reg [2:0] uops_7_iw_p2_speculative_child; // @[util.scala:505:22]
reg uops_7_iw_p1_bypass_hint; // @[util.scala:505:22]
reg uops_7_iw_p2_bypass_hint; // @[util.scala:505:22]
reg uops_7_iw_p3_bypass_hint; // @[util.scala:505:22]
reg [2:0] uops_7_dis_col_sel; // @[util.scala:505:22]
reg [15:0] uops_7_br_mask; // @[util.scala:505:22]
reg [3:0] uops_7_br_tag; // @[util.scala:505:22]
reg [3:0] uops_7_br_type; // @[util.scala:505:22]
reg uops_7_is_sfb; // @[util.scala:505:22]
reg uops_7_is_fence; // @[util.scala:505:22]
reg uops_7_is_fencei; // @[util.scala:505:22]
reg uops_7_is_sfence; // @[util.scala:505:22]
reg uops_7_is_amo; // @[util.scala:505:22]
reg uops_7_is_eret; // @[util.scala:505:22]
reg uops_7_is_sys_pc2epc; // @[util.scala:505:22]
reg uops_7_is_rocc; // @[util.scala:505:22]
reg uops_7_is_mov; // @[util.scala:505:22]
reg [4:0] uops_7_ftq_idx; // @[util.scala:505:22]
reg uops_7_edge_inst; // @[util.scala:505:22]
reg [5:0] uops_7_pc_lob; // @[util.scala:505:22]
reg uops_7_taken; // @[util.scala:505:22]
reg uops_7_imm_rename; // @[util.scala:505:22]
reg [2:0] uops_7_imm_sel; // @[util.scala:505:22]
reg [4:0] uops_7_pimm; // @[util.scala:505:22]
reg [19:0] uops_7_imm_packed; // @[util.scala:505:22]
reg [1:0] uops_7_op1_sel; // @[util.scala:505:22]
reg [2:0] uops_7_op2_sel; // @[util.scala:505:22]
reg uops_7_fp_ctrl_ldst; // @[util.scala:505:22]
reg uops_7_fp_ctrl_wen; // @[util.scala:505:22]
reg uops_7_fp_ctrl_ren1; // @[util.scala:505:22]
reg uops_7_fp_ctrl_ren2; // @[util.scala:505:22]
reg uops_7_fp_ctrl_ren3; // @[util.scala:505:22]
reg uops_7_fp_ctrl_swap12; // @[util.scala:505:22]
reg uops_7_fp_ctrl_swap23; // @[util.scala:505:22]
reg [1:0] uops_7_fp_ctrl_typeTagIn; // @[util.scala:505:22]
reg [1:0] uops_7_fp_ctrl_typeTagOut; // @[util.scala:505:22]
reg uops_7_fp_ctrl_fromint; // @[util.scala:505:22]
reg uops_7_fp_ctrl_toint; // @[util.scala:505:22]
reg uops_7_fp_ctrl_fastpipe; // @[util.scala:505:22]
reg uops_7_fp_ctrl_fma; // @[util.scala:505:22]
reg uops_7_fp_ctrl_div; // @[util.scala:505:22]
reg uops_7_fp_ctrl_sqrt; // @[util.scala:505:22]
reg uops_7_fp_ctrl_wflags; // @[util.scala:505:22]
reg uops_7_fp_ctrl_vec; // @[util.scala:505:22]
reg [6:0] uops_7_rob_idx; // @[util.scala:505:22]
reg [4:0] uops_7_ldq_idx; // @[util.scala:505:22]
reg [4:0] uops_7_stq_idx; // @[util.scala:505:22]
reg [1:0] uops_7_rxq_idx; // @[util.scala:505:22]
reg [6:0] uops_7_pdst; // @[util.scala:505:22]
reg [6:0] uops_7_prs1; // @[util.scala:505:22]
reg [6:0] uops_7_prs2; // @[util.scala:505:22]
reg [6:0] uops_7_prs3; // @[util.scala:505:22]
reg [4:0] uops_7_ppred; // @[util.scala:505:22]
reg uops_7_prs1_busy; // @[util.scala:505:22]
reg uops_7_prs2_busy; // @[util.scala:505:22]
reg uops_7_prs3_busy; // @[util.scala:505:22]
reg uops_7_ppred_busy; // @[util.scala:505:22]
reg [6:0] uops_7_stale_pdst; // @[util.scala:505:22]
reg uops_7_exception; // @[util.scala:505:22]
reg [63:0] uops_7_exc_cause; // @[util.scala:505:22]
reg [4:0] uops_7_mem_cmd; // @[util.scala:505:22]
reg [1:0] uops_7_mem_size; // @[util.scala:505:22]
reg uops_7_mem_signed; // @[util.scala:505:22]
reg uops_7_uses_ldq; // @[util.scala:505:22]
reg uops_7_uses_stq; // @[util.scala:505:22]
reg uops_7_is_unique; // @[util.scala:505:22]
reg uops_7_flush_on_commit; // @[util.scala:505:22]
reg [2:0] uops_7_csr_cmd; // @[util.scala:505:22]
reg uops_7_ldst_is_rs1; // @[util.scala:505:22]
reg [5:0] uops_7_ldst; // @[util.scala:505:22]
reg [5:0] uops_7_lrs1; // @[util.scala:505:22]
reg [5:0] uops_7_lrs2; // @[util.scala:505:22]
reg [5:0] uops_7_lrs3; // @[util.scala:505:22]
reg [1:0] uops_7_dst_rtype; // @[util.scala:505:22]
reg [1:0] uops_7_lrs1_rtype; // @[util.scala:505:22]
reg [1:0] uops_7_lrs2_rtype; // @[util.scala:505:22]
reg uops_7_frs3_en; // @[util.scala:505:22]
reg uops_7_fcn_dw; // @[util.scala:505:22]
reg [4:0] uops_7_fcn_op; // @[util.scala:505:22]
reg uops_7_fp_val; // @[util.scala:505:22]
reg [2:0] uops_7_fp_rm; // @[util.scala:505:22]
reg [1:0] uops_7_fp_typ; // @[util.scala:505:22]
reg uops_7_xcpt_pf_if; // @[util.scala:505:22]
reg uops_7_xcpt_ae_if; // @[util.scala:505:22]
reg uops_7_xcpt_ma_if; // @[util.scala:505:22]
reg uops_7_bp_debug_if; // @[util.scala:505:22]
reg uops_7_bp_xcpt_if; // @[util.scala:505:22]
reg [2:0] uops_7_debug_fsrc; // @[util.scala:505:22]
reg [2:0] uops_7_debug_tsrc; // @[util.scala:505:22]
reg [2:0] enq_ptr_value; // @[Counter.scala:61:40]
reg [2:0] deq_ptr_value; // @[Counter.scala:61:40]
reg maybe_full; // @[util.scala:509:29]
wire ptr_match = enq_ptr_value == deq_ptr_value; // @[Counter.scala:61:40]
wire _io_empty_T = ~maybe_full; // @[util.scala:509:29, :512:30]
assign _io_empty_T_1 = ptr_match & _io_empty_T; // @[util.scala:511:35, :512:{27,30}]
assign io_empty = _io_empty_T_1; // @[util.scala:458:7, :512:27]
wire _GEN = ptr_match & maybe_full; // @[util.scala:509:29, :511:35, :513:26]
wire full; // @[util.scala:513:26]
assign full = _GEN; // @[util.scala:513:26]
wire _io_count_T; // @[util.scala:553:34]
assign _io_count_T = _GEN; // @[util.scala:513:26, :553:34]
wire _do_enq_T = io_enq_ready_0 & io_enq_valid_0; // @[Decoupled.scala:51:35]
wire [15:0] _do_enq_T_1 = io_brupdate_b1_mispredict_mask_0 & io_enq_bits_uop_br_mask_0; // @[util.scala:126:51, :458:7]
wire _do_enq_T_2 = |_do_enq_T_1; // @[util.scala:126:{51,59}]
wire _do_enq_T_3 = _do_enq_T_2; // @[util.scala:61:61, :126:59]
wire _do_enq_T_4 = ~_do_enq_T_3; // @[util.scala:61:61, :514:42]
wire _do_enq_T_5 = _do_enq_T & _do_enq_T_4; // @[Decoupled.scala:51:35]
wire _do_enq_T_7 = ~_do_enq_T_6; // @[util.scala:514:{102,113}]
wire _do_enq_T_8 = _do_enq_T_5 & _do_enq_T_7; // @[util.scala:514:{39,99,102}]
wire do_enq = _do_enq_T_8; // @[util.scala:514:{26,99}]
wire [7:0] _GEN_0 = {{valids_7}, {valids_6}, {valids_5}, {valids_4}, {valids_3}, {valids_2}, {valids_1}, {valids_0}}; // @[util.scala:504:26, :515:44]
wire _GEN_1 = _GEN_0[deq_ptr_value]; // @[Counter.scala:61:40]
wire _do_deq_T = ~_GEN_1; // @[util.scala:515:44]
wire _do_deq_T_1 = io_deq_ready_0 | _do_deq_T; // @[util.scala:458:7, :515:{41,44}]
wire _do_deq_T_2 = ~io_empty; // @[util.scala:458:7, :515:71]
wire _do_deq_T_3 = _do_deq_T_1 & _do_deq_T_2; // @[util.scala:515:{41,68,71}]
wire do_deq = _do_deq_T_3; // @[util.scala:515:{26,68}]
wire [15:0] _valids_0_T = io_brupdate_b1_mispredict_mask_0 & uops_0_br_mask; // @[util.scala:126:51, :458:7, :505:22]
wire _valids_0_T_1 = |_valids_0_T; // @[util.scala:126:{51,59}]
wire _valids_0_T_2 = _valids_0_T_1; // @[util.scala:61:61, :126:59]
wire _valids_0_T_3 = ~_valids_0_T_2; // @[util.scala:61:61, :520:34]
wire _valids_0_T_4 = valids_0 & _valids_0_T_3; // @[util.scala:504:26, :520:{31,34}]
wire _valids_0_T_6 = ~_valids_0_T_5; // @[util.scala:520:{83,94}]
wire _valids_0_T_7 = _valids_0_T_4 & _valids_0_T_6; // @[util.scala:520:{31,80,83}]
wire [15:0] _uops_0_br_mask_T = ~io_brupdate_b1_resolve_mask_0; // @[util.scala:97:23, :458:7]
wire [15:0] _uops_0_br_mask_T_1 = uops_0_br_mask & _uops_0_br_mask_T; // @[util.scala:97:{21,23}, :505:22]
wire [15:0] _valids_1_T = io_brupdate_b1_mispredict_mask_0 & uops_1_br_mask; // @[util.scala:126:51, :458:7, :505:22]
wire _valids_1_T_1 = |_valids_1_T; // @[util.scala:126:{51,59}]
wire _valids_1_T_2 = _valids_1_T_1; // @[util.scala:61:61, :126:59]
wire _valids_1_T_3 = ~_valids_1_T_2; // @[util.scala:61:61, :520:34]
wire _valids_1_T_4 = valids_1 & _valids_1_T_3; // @[util.scala:504:26, :520:{31,34}]
wire _valids_1_T_6 = ~_valids_1_T_5; // @[util.scala:520:{83,94}]
wire _valids_1_T_7 = _valids_1_T_4 & _valids_1_T_6; // @[util.scala:520:{31,80,83}]
wire [15:0] _uops_1_br_mask_T = ~io_brupdate_b1_resolve_mask_0; // @[util.scala:97:23, :458:7]
wire [15:0] _uops_1_br_mask_T_1 = uops_1_br_mask & _uops_1_br_mask_T; // @[util.scala:97:{21,23}, :505:22]
wire [15:0] _valids_2_T = io_brupdate_b1_mispredict_mask_0 & uops_2_br_mask; // @[util.scala:126:51, :458:7, :505:22]
wire _valids_2_T_1 = |_valids_2_T; // @[util.scala:126:{51,59}]
wire _valids_2_T_2 = _valids_2_T_1; // @[util.scala:61:61, :126:59]
wire _valids_2_T_3 = ~_valids_2_T_2; // @[util.scala:61:61, :520:34]
wire _valids_2_T_4 = valids_2 & _valids_2_T_3; // @[util.scala:504:26, :520:{31,34}]
wire _valids_2_T_6 = ~_valids_2_T_5; // @[util.scala:520:{83,94}]
wire _valids_2_T_7 = _valids_2_T_4 & _valids_2_T_6; // @[util.scala:520:{31,80,83}]
wire [15:0] _uops_2_br_mask_T = ~io_brupdate_b1_resolve_mask_0; // @[util.scala:97:23, :458:7]
wire [15:0] _uops_2_br_mask_T_1 = uops_2_br_mask & _uops_2_br_mask_T; // @[util.scala:97:{21,23}, :505:22]
wire [15:0] _valids_3_T = io_brupdate_b1_mispredict_mask_0 & uops_3_br_mask; // @[util.scala:126:51, :458:7, :505:22]
wire _valids_3_T_1 = |_valids_3_T; // @[util.scala:126:{51,59}]
wire _valids_3_T_2 = _valids_3_T_1; // @[util.scala:61:61, :126:59]
wire _valids_3_T_3 = ~_valids_3_T_2; // @[util.scala:61:61, :520:34]
wire _valids_3_T_4 = valids_3 & _valids_3_T_3; // @[util.scala:504:26, :520:{31,34}]
wire _valids_3_T_6 = ~_valids_3_T_5; // @[util.scala:520:{83,94}]
wire _valids_3_T_7 = _valids_3_T_4 & _valids_3_T_6; // @[util.scala:520:{31,80,83}]
wire [15:0] _uops_3_br_mask_T = ~io_brupdate_b1_resolve_mask_0; // @[util.scala:97:23, :458:7]
wire [15:0] _uops_3_br_mask_T_1 = uops_3_br_mask & _uops_3_br_mask_T; // @[util.scala:97:{21,23}, :505:22]
wire [15:0] _valids_4_T = io_brupdate_b1_mispredict_mask_0 & uops_4_br_mask; // @[util.scala:126:51, :458:7, :505:22]
wire _valids_4_T_1 = |_valids_4_T; // @[util.scala:126:{51,59}]
wire _valids_4_T_2 = _valids_4_T_1; // @[util.scala:61:61, :126:59]
wire _valids_4_T_3 = ~_valids_4_T_2; // @[util.scala:61:61, :520:34]
wire _valids_4_T_4 = valids_4 & _valids_4_T_3; // @[util.scala:504:26, :520:{31,34}]
wire _valids_4_T_6 = ~_valids_4_T_5; // @[util.scala:520:{83,94}]
wire _valids_4_T_7 = _valids_4_T_4 & _valids_4_T_6; // @[util.scala:520:{31,80,83}]
wire [15:0] _uops_4_br_mask_T = ~io_brupdate_b1_resolve_mask_0; // @[util.scala:97:23, :458:7]
wire [15:0] _uops_4_br_mask_T_1 = uops_4_br_mask & _uops_4_br_mask_T; // @[util.scala:97:{21,23}, :505:22]
wire [15:0] _valids_5_T = io_brupdate_b1_mispredict_mask_0 & uops_5_br_mask; // @[util.scala:126:51, :458:7, :505:22]
wire _valids_5_T_1 = |_valids_5_T; // @[util.scala:126:{51,59}]
wire _valids_5_T_2 = _valids_5_T_1; // @[util.scala:61:61, :126:59]
wire _valids_5_T_3 = ~_valids_5_T_2; // @[util.scala:61:61, :520:34]
wire _valids_5_T_4 = valids_5 & _valids_5_T_3; // @[util.scala:504:26, :520:{31,34}]
wire _valids_5_T_6 = ~_valids_5_T_5; // @[util.scala:520:{83,94}]
wire _valids_5_T_7 = _valids_5_T_4 & _valids_5_T_6; // @[util.scala:520:{31,80,83}]
wire [15:0] _uops_5_br_mask_T = ~io_brupdate_b1_resolve_mask_0; // @[util.scala:97:23, :458:7]
wire [15:0] _uops_5_br_mask_T_1 = uops_5_br_mask & _uops_5_br_mask_T; // @[util.scala:97:{21,23}, :505:22]
wire [15:0] _valids_6_T = io_brupdate_b1_mispredict_mask_0 & uops_6_br_mask; // @[util.scala:126:51, :458:7, :505:22]
wire _valids_6_T_1 = |_valids_6_T; // @[util.scala:126:{51,59}]
wire _valids_6_T_2 = _valids_6_T_1; // @[util.scala:61:61, :126:59]
wire _valids_6_T_3 = ~_valids_6_T_2; // @[util.scala:61:61, :520:34]
wire _valids_6_T_4 = valids_6 & _valids_6_T_3; // @[util.scala:504:26, :520:{31,34}]
wire _valids_6_T_6 = ~_valids_6_T_5; // @[util.scala:520:{83,94}]
wire _valids_6_T_7 = _valids_6_T_4 & _valids_6_T_6; // @[util.scala:520:{31,80,83}]
wire [15:0] _uops_6_br_mask_T = ~io_brupdate_b1_resolve_mask_0; // @[util.scala:97:23, :458:7]
wire [15:0] _uops_6_br_mask_T_1 = uops_6_br_mask & _uops_6_br_mask_T; // @[util.scala:97:{21,23}, :505:22]
wire [15:0] _valids_7_T = io_brupdate_b1_mispredict_mask_0 & uops_7_br_mask; // @[util.scala:126:51, :458:7, :505:22]
wire _valids_7_T_1 = |_valids_7_T; // @[util.scala:126:{51,59}]
wire _valids_7_T_2 = _valids_7_T_1; // @[util.scala:61:61, :126:59]
wire _valids_7_T_3 = ~_valids_7_T_2; // @[util.scala:61:61, :520:34]
wire _valids_7_T_4 = valids_7 & _valids_7_T_3; // @[util.scala:504:26, :520:{31,34}]
wire _valids_7_T_6 = ~_valids_7_T_5; // @[util.scala:520:{83,94}]
wire _valids_7_T_7 = _valids_7_T_4 & _valids_7_T_6; // @[util.scala:520:{31,80,83}]
wire [15:0] _uops_7_br_mask_T = ~io_brupdate_b1_resolve_mask_0; // @[util.scala:97:23, :458:7]
wire [15:0] _uops_7_br_mask_T_1 = uops_7_br_mask & _uops_7_br_mask_T; // @[util.scala:97:{21,23}, :505:22]
wire [15:0] _uops_br_mask_T = ~io_brupdate_b1_resolve_mask_0; // @[util.scala:93:27, :97:23, :458:7]
wire [15:0] _uops_br_mask_T_1 = io_enq_bits_uop_br_mask_0 & _uops_br_mask_T; // @[util.scala:93:{25,27}, :458:7]
wire wrap = &enq_ptr_value; // @[Counter.scala:61:40, :73:24]
wire [3:0] _GEN_2 = {1'h0, enq_ptr_value}; // @[Counter.scala:61:40, :77:24]
wire [3:0] _value_T = _GEN_2 + 4'h1; // @[Counter.scala:77:24]
wire [2:0] _value_T_1 = _value_T[2:0]; // @[Counter.scala:77:24]
wire wrap_1 = &deq_ptr_value; // @[Counter.scala:61:40, :73:24]
wire [3:0] _GEN_3 = {1'h0, deq_ptr_value}; // @[Counter.scala:61:40, :77:24]
wire [3:0] _value_T_2 = _GEN_3 + 4'h1; // @[Counter.scala:77:24]
wire [2:0] _value_T_3 = _value_T_2[2:0]; // @[Counter.scala:77:24]
assign _io_enq_ready_T = ~full; // @[util.scala:513:26, :543:21]
assign io_enq_ready_0 = _io_enq_ready_T; // @[util.scala:458:7, :543:21]
assign io_deq_bits_uop_inst_0 = out_uop_inst; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_debug_inst_0 = out_uop_debug_inst; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_is_rvc_0 = out_uop_is_rvc; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_debug_pc_0 = out_uop_debug_pc; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_iq_type_0_0 = out_uop_iq_type_0; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_iq_type_1_0 = out_uop_iq_type_1; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_iq_type_2_0 = out_uop_iq_type_2; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_iq_type_3_0 = out_uop_iq_type_3; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fu_code_0_0 = out_uop_fu_code_0; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fu_code_1_0 = out_uop_fu_code_1; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fu_code_2_0 = out_uop_fu_code_2; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fu_code_3_0 = out_uop_fu_code_3; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fu_code_4_0 = out_uop_fu_code_4; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fu_code_5_0 = out_uop_fu_code_5; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fu_code_6_0 = out_uop_fu_code_6; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fu_code_7_0 = out_uop_fu_code_7; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fu_code_8_0 = out_uop_fu_code_8; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fu_code_9_0 = out_uop_fu_code_9; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_iw_issued_0 = out_uop_iw_issued; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_iw_issued_partial_agen_0 = out_uop_iw_issued_partial_agen; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_iw_issued_partial_dgen_0 = out_uop_iw_issued_partial_dgen; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_iw_p1_speculative_child_0 = out_uop_iw_p1_speculative_child; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_iw_p2_speculative_child_0 = out_uop_iw_p2_speculative_child; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_iw_p1_bypass_hint_0 = out_uop_iw_p1_bypass_hint; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_iw_p2_bypass_hint_0 = out_uop_iw_p2_bypass_hint; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_iw_p3_bypass_hint_0 = out_uop_iw_p3_bypass_hint; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_dis_col_sel_0 = out_uop_dis_col_sel; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_br_mask_0 = out_uop_br_mask; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_br_tag_0 = out_uop_br_tag; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_br_type_0 = out_uop_br_type; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_is_sfb_0 = out_uop_is_sfb; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_is_fence_0 = out_uop_is_fence; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_is_fencei_0 = out_uop_is_fencei; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_is_sfence_0 = out_uop_is_sfence; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_is_amo_0 = out_uop_is_amo; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_is_eret_0 = out_uop_is_eret; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_is_sys_pc2epc_0 = out_uop_is_sys_pc2epc; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_is_rocc_0 = out_uop_is_rocc; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_is_mov_0 = out_uop_is_mov; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_ftq_idx_0 = out_uop_ftq_idx; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_edge_inst_0 = out_uop_edge_inst; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_pc_lob_0 = out_uop_pc_lob; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_taken_0 = out_uop_taken; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_imm_rename_0 = out_uop_imm_rename; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_imm_sel_0 = out_uop_imm_sel; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_pimm_0 = out_uop_pimm; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_imm_packed_0 = out_uop_imm_packed; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_op1_sel_0 = out_uop_op1_sel; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_op2_sel_0 = out_uop_op2_sel; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_ctrl_ldst_0 = out_uop_fp_ctrl_ldst; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_ctrl_wen_0 = out_uop_fp_ctrl_wen; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_ctrl_ren1_0 = out_uop_fp_ctrl_ren1; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_ctrl_ren2_0 = out_uop_fp_ctrl_ren2; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_ctrl_ren3_0 = out_uop_fp_ctrl_ren3; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_ctrl_swap12_0 = out_uop_fp_ctrl_swap12; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_ctrl_swap23_0 = out_uop_fp_ctrl_swap23; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_ctrl_typeTagIn_0 = out_uop_fp_ctrl_typeTagIn; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_ctrl_typeTagOut_0 = out_uop_fp_ctrl_typeTagOut; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_ctrl_fromint_0 = out_uop_fp_ctrl_fromint; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_ctrl_toint_0 = out_uop_fp_ctrl_toint; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_ctrl_fastpipe_0 = out_uop_fp_ctrl_fastpipe; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_ctrl_fma_0 = out_uop_fp_ctrl_fma; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_ctrl_div_0 = out_uop_fp_ctrl_div; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_ctrl_sqrt_0 = out_uop_fp_ctrl_sqrt; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_ctrl_wflags_0 = out_uop_fp_ctrl_wflags; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_ctrl_vec_0 = out_uop_fp_ctrl_vec; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_rob_idx_0 = out_uop_rob_idx; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_ldq_idx_0 = out_uop_ldq_idx; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_stq_idx_0 = out_uop_stq_idx; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_rxq_idx_0 = out_uop_rxq_idx; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_pdst_0 = out_uop_pdst; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_prs1_0 = out_uop_prs1; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_prs2_0 = out_uop_prs2; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_prs3_0 = out_uop_prs3; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_ppred_0 = out_uop_ppred; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_prs1_busy_0 = out_uop_prs1_busy; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_prs2_busy_0 = out_uop_prs2_busy; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_prs3_busy_0 = out_uop_prs3_busy; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_ppred_busy_0 = out_uop_ppred_busy; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_stale_pdst_0 = out_uop_stale_pdst; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_exception_0 = out_uop_exception; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_exc_cause_0 = out_uop_exc_cause; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_mem_cmd_0 = out_uop_mem_cmd; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_mem_size_0 = out_uop_mem_size; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_mem_signed_0 = out_uop_mem_signed; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_uses_ldq_0 = out_uop_uses_ldq; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_uses_stq_0 = out_uop_uses_stq; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_is_unique_0 = out_uop_is_unique; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_flush_on_commit_0 = out_uop_flush_on_commit; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_csr_cmd_0 = out_uop_csr_cmd; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_ldst_is_rs1_0 = out_uop_ldst_is_rs1; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_ldst_0 = out_uop_ldst; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_lrs1_0 = out_uop_lrs1; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_lrs2_0 = out_uop_lrs2; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_lrs3_0 = out_uop_lrs3; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_dst_rtype_0 = out_uop_dst_rtype; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_lrs1_rtype_0 = out_uop_lrs1_rtype; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_lrs2_rtype_0 = out_uop_lrs2_rtype; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_frs3_en_0 = out_uop_frs3_en; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fcn_dw_0 = out_uop_fcn_dw; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fcn_op_0 = out_uop_fcn_op; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_val_0 = out_uop_fp_val; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_rm_0 = out_uop_fp_rm; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_fp_typ_0 = out_uop_fp_typ; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_xcpt_pf_if_0 = out_uop_xcpt_pf_if; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_xcpt_ae_if_0 = out_uop_xcpt_ae_if; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_xcpt_ma_if_0 = out_uop_xcpt_ma_if; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_bp_debug_if_0 = out_uop_bp_debug_if; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_bp_xcpt_if_0 = out_uop_bp_xcpt_if; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_debug_fsrc_0 = out_uop_debug_fsrc; // @[util.scala:458:7, :545:19]
assign io_deq_bits_uop_debug_tsrc_0 = out_uop_debug_tsrc; // @[util.scala:458:7, :545:19]
assign io_deq_bits_data_0 = out_data; // @[util.scala:458:7, :545:19]
assign io_deq_bits_predicated_0 = out_predicated; // @[util.scala:458:7, :545:19]
assign io_deq_bits_fflags_valid_0 = out_fflags_valid; // @[util.scala:458:7, :545:19]
assign io_deq_bits_fflags_bits_0 = out_fflags_bits; // @[util.scala:458:7, :545:19]
wire [7:0][31:0] _GEN_4 = {{uops_7_inst}, {uops_6_inst}, {uops_5_inst}, {uops_4_inst}, {uops_3_inst}, {uops_2_inst}, {uops_1_inst}, {uops_0_inst}}; // @[util.scala:505:22, :547:21]
assign out_uop_inst = _GEN_4[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0][31:0] _GEN_5 = {{uops_7_debug_inst}, {uops_6_debug_inst}, {uops_5_debug_inst}, {uops_4_debug_inst}, {uops_3_debug_inst}, {uops_2_debug_inst}, {uops_1_debug_inst}, {uops_0_debug_inst}}; // @[util.scala:505:22, :547:21]
assign out_uop_debug_inst = _GEN_5[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_6 = {{uops_7_is_rvc}, {uops_6_is_rvc}, {uops_5_is_rvc}, {uops_4_is_rvc}, {uops_3_is_rvc}, {uops_2_is_rvc}, {uops_1_is_rvc}, {uops_0_is_rvc}}; // @[util.scala:505:22, :547:21]
assign out_uop_is_rvc = _GEN_6[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0][39:0] _GEN_7 = {{uops_7_debug_pc}, {uops_6_debug_pc}, {uops_5_debug_pc}, {uops_4_debug_pc}, {uops_3_debug_pc}, {uops_2_debug_pc}, {uops_1_debug_pc}, {uops_0_debug_pc}}; // @[util.scala:505:22, :547:21]
assign out_uop_debug_pc = _GEN_7[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_8 = {{uops_7_iq_type_0}, {uops_6_iq_type_0}, {uops_5_iq_type_0}, {uops_4_iq_type_0}, {uops_3_iq_type_0}, {uops_2_iq_type_0}, {uops_1_iq_type_0}, {uops_0_iq_type_0}}; // @[util.scala:505:22, :547:21]
assign out_uop_iq_type_0 = _GEN_8[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_9 = {{uops_7_iq_type_1}, {uops_6_iq_type_1}, {uops_5_iq_type_1}, {uops_4_iq_type_1}, {uops_3_iq_type_1}, {uops_2_iq_type_1}, {uops_1_iq_type_1}, {uops_0_iq_type_1}}; // @[util.scala:505:22, :547:21]
assign out_uop_iq_type_1 = _GEN_9[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_10 = {{uops_7_iq_type_2}, {uops_6_iq_type_2}, {uops_5_iq_type_2}, {uops_4_iq_type_2}, {uops_3_iq_type_2}, {uops_2_iq_type_2}, {uops_1_iq_type_2}, {uops_0_iq_type_2}}; // @[util.scala:505:22, :547:21]
assign out_uop_iq_type_2 = _GEN_10[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_11 = {{uops_7_iq_type_3}, {uops_6_iq_type_3}, {uops_5_iq_type_3}, {uops_4_iq_type_3}, {uops_3_iq_type_3}, {uops_2_iq_type_3}, {uops_1_iq_type_3}, {uops_0_iq_type_3}}; // @[util.scala:505:22, :547:21]
assign out_uop_iq_type_3 = _GEN_11[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_12 = {{uops_7_fu_code_0}, {uops_6_fu_code_0}, {uops_5_fu_code_0}, {uops_4_fu_code_0}, {uops_3_fu_code_0}, {uops_2_fu_code_0}, {uops_1_fu_code_0}, {uops_0_fu_code_0}}; // @[util.scala:505:22, :547:21]
assign out_uop_fu_code_0 = _GEN_12[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_13 = {{uops_7_fu_code_1}, {uops_6_fu_code_1}, {uops_5_fu_code_1}, {uops_4_fu_code_1}, {uops_3_fu_code_1}, {uops_2_fu_code_1}, {uops_1_fu_code_1}, {uops_0_fu_code_1}}; // @[util.scala:505:22, :547:21]
assign out_uop_fu_code_1 = _GEN_13[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_14 = {{uops_7_fu_code_2}, {uops_6_fu_code_2}, {uops_5_fu_code_2}, {uops_4_fu_code_2}, {uops_3_fu_code_2}, {uops_2_fu_code_2}, {uops_1_fu_code_2}, {uops_0_fu_code_2}}; // @[util.scala:505:22, :547:21]
assign out_uop_fu_code_2 = _GEN_14[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_15 = {{uops_7_fu_code_3}, {uops_6_fu_code_3}, {uops_5_fu_code_3}, {uops_4_fu_code_3}, {uops_3_fu_code_3}, {uops_2_fu_code_3}, {uops_1_fu_code_3}, {uops_0_fu_code_3}}; // @[util.scala:505:22, :547:21]
assign out_uop_fu_code_3 = _GEN_15[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_16 = {{uops_7_fu_code_4}, {uops_6_fu_code_4}, {uops_5_fu_code_4}, {uops_4_fu_code_4}, {uops_3_fu_code_4}, {uops_2_fu_code_4}, {uops_1_fu_code_4}, {uops_0_fu_code_4}}; // @[util.scala:505:22, :547:21]
assign out_uop_fu_code_4 = _GEN_16[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_17 = {{uops_7_fu_code_5}, {uops_6_fu_code_5}, {uops_5_fu_code_5}, {uops_4_fu_code_5}, {uops_3_fu_code_5}, {uops_2_fu_code_5}, {uops_1_fu_code_5}, {uops_0_fu_code_5}}; // @[util.scala:505:22, :547:21]
assign out_uop_fu_code_5 = _GEN_17[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_18 = {{uops_7_fu_code_6}, {uops_6_fu_code_6}, {uops_5_fu_code_6}, {uops_4_fu_code_6}, {uops_3_fu_code_6}, {uops_2_fu_code_6}, {uops_1_fu_code_6}, {uops_0_fu_code_6}}; // @[util.scala:505:22, :547:21]
assign out_uop_fu_code_6 = _GEN_18[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_19 = {{uops_7_fu_code_7}, {uops_6_fu_code_7}, {uops_5_fu_code_7}, {uops_4_fu_code_7}, {uops_3_fu_code_7}, {uops_2_fu_code_7}, {uops_1_fu_code_7}, {uops_0_fu_code_7}}; // @[util.scala:505:22, :547:21]
assign out_uop_fu_code_7 = _GEN_19[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_20 = {{uops_7_fu_code_8}, {uops_6_fu_code_8}, {uops_5_fu_code_8}, {uops_4_fu_code_8}, {uops_3_fu_code_8}, {uops_2_fu_code_8}, {uops_1_fu_code_8}, {uops_0_fu_code_8}}; // @[util.scala:505:22, :547:21]
assign out_uop_fu_code_8 = _GEN_20[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_21 = {{uops_7_fu_code_9}, {uops_6_fu_code_9}, {uops_5_fu_code_9}, {uops_4_fu_code_9}, {uops_3_fu_code_9}, {uops_2_fu_code_9}, {uops_1_fu_code_9}, {uops_0_fu_code_9}}; // @[util.scala:505:22, :547:21]
assign out_uop_fu_code_9 = _GEN_21[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_22 = {{uops_7_iw_issued}, {uops_6_iw_issued}, {uops_5_iw_issued}, {uops_4_iw_issued}, {uops_3_iw_issued}, {uops_2_iw_issued}, {uops_1_iw_issued}, {uops_0_iw_issued}}; // @[util.scala:505:22, :547:21]
assign out_uop_iw_issued = _GEN_22[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_23 = {{uops_7_iw_issued_partial_agen}, {uops_6_iw_issued_partial_agen}, {uops_5_iw_issued_partial_agen}, {uops_4_iw_issued_partial_agen}, {uops_3_iw_issued_partial_agen}, {uops_2_iw_issued_partial_agen}, {uops_1_iw_issued_partial_agen}, {uops_0_iw_issued_partial_agen}}; // @[util.scala:505:22, :547:21]
assign out_uop_iw_issued_partial_agen = _GEN_23[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_24 = {{uops_7_iw_issued_partial_dgen}, {uops_6_iw_issued_partial_dgen}, {uops_5_iw_issued_partial_dgen}, {uops_4_iw_issued_partial_dgen}, {uops_3_iw_issued_partial_dgen}, {uops_2_iw_issued_partial_dgen}, {uops_1_iw_issued_partial_dgen}, {uops_0_iw_issued_partial_dgen}}; // @[util.scala:505:22, :547:21]
assign out_uop_iw_issued_partial_dgen = _GEN_24[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0][2:0] _GEN_25 = {{uops_7_iw_p1_speculative_child}, {uops_6_iw_p1_speculative_child}, {uops_5_iw_p1_speculative_child}, {uops_4_iw_p1_speculative_child}, {uops_3_iw_p1_speculative_child}, {uops_2_iw_p1_speculative_child}, {uops_1_iw_p1_speculative_child}, {uops_0_iw_p1_speculative_child}}; // @[util.scala:505:22, :547:21]
assign out_uop_iw_p1_speculative_child = _GEN_25[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0][2:0] _GEN_26 = {{uops_7_iw_p2_speculative_child}, {uops_6_iw_p2_speculative_child}, {uops_5_iw_p2_speculative_child}, {uops_4_iw_p2_speculative_child}, {uops_3_iw_p2_speculative_child}, {uops_2_iw_p2_speculative_child}, {uops_1_iw_p2_speculative_child}, {uops_0_iw_p2_speculative_child}}; // @[util.scala:505:22, :547:21]
assign out_uop_iw_p2_speculative_child = _GEN_26[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_27 = {{uops_7_iw_p1_bypass_hint}, {uops_6_iw_p1_bypass_hint}, {uops_5_iw_p1_bypass_hint}, {uops_4_iw_p1_bypass_hint}, {uops_3_iw_p1_bypass_hint}, {uops_2_iw_p1_bypass_hint}, {uops_1_iw_p1_bypass_hint}, {uops_0_iw_p1_bypass_hint}}; // @[util.scala:505:22, :547:21]
assign out_uop_iw_p1_bypass_hint = _GEN_27[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_28 = {{uops_7_iw_p2_bypass_hint}, {uops_6_iw_p2_bypass_hint}, {uops_5_iw_p2_bypass_hint}, {uops_4_iw_p2_bypass_hint}, {uops_3_iw_p2_bypass_hint}, {uops_2_iw_p2_bypass_hint}, {uops_1_iw_p2_bypass_hint}, {uops_0_iw_p2_bypass_hint}}; // @[util.scala:505:22, :547:21]
assign out_uop_iw_p2_bypass_hint = _GEN_28[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_29 = {{uops_7_iw_p3_bypass_hint}, {uops_6_iw_p3_bypass_hint}, {uops_5_iw_p3_bypass_hint}, {uops_4_iw_p3_bypass_hint}, {uops_3_iw_p3_bypass_hint}, {uops_2_iw_p3_bypass_hint}, {uops_1_iw_p3_bypass_hint}, {uops_0_iw_p3_bypass_hint}}; // @[util.scala:505:22, :547:21]
assign out_uop_iw_p3_bypass_hint = _GEN_29[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0][2:0] _GEN_30 = {{uops_7_dis_col_sel}, {uops_6_dis_col_sel}, {uops_5_dis_col_sel}, {uops_4_dis_col_sel}, {uops_3_dis_col_sel}, {uops_2_dis_col_sel}, {uops_1_dis_col_sel}, {uops_0_dis_col_sel}}; // @[util.scala:505:22, :547:21]
assign out_uop_dis_col_sel = _GEN_30[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0][15:0] _GEN_31 = {{uops_7_br_mask}, {uops_6_br_mask}, {uops_5_br_mask}, {uops_4_br_mask}, {uops_3_br_mask}, {uops_2_br_mask}, {uops_1_br_mask}, {uops_0_br_mask}}; // @[util.scala:505:22, :547:21]
assign out_uop_br_mask = _GEN_31[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0][3:0] _GEN_32 = {{uops_7_br_tag}, {uops_6_br_tag}, {uops_5_br_tag}, {uops_4_br_tag}, {uops_3_br_tag}, {uops_2_br_tag}, {uops_1_br_tag}, {uops_0_br_tag}}; // @[util.scala:505:22, :547:21]
assign out_uop_br_tag = _GEN_32[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0][3:0] _GEN_33 = {{uops_7_br_type}, {uops_6_br_type}, {uops_5_br_type}, {uops_4_br_type}, {uops_3_br_type}, {uops_2_br_type}, {uops_1_br_type}, {uops_0_br_type}}; // @[util.scala:505:22, :547:21]
assign out_uop_br_type = _GEN_33[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_34 = {{uops_7_is_sfb}, {uops_6_is_sfb}, {uops_5_is_sfb}, {uops_4_is_sfb}, {uops_3_is_sfb}, {uops_2_is_sfb}, {uops_1_is_sfb}, {uops_0_is_sfb}}; // @[util.scala:505:22, :547:21]
assign out_uop_is_sfb = _GEN_34[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_35 = {{uops_7_is_fence}, {uops_6_is_fence}, {uops_5_is_fence}, {uops_4_is_fence}, {uops_3_is_fence}, {uops_2_is_fence}, {uops_1_is_fence}, {uops_0_is_fence}}; // @[util.scala:505:22, :547:21]
assign out_uop_is_fence = _GEN_35[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_36 = {{uops_7_is_fencei}, {uops_6_is_fencei}, {uops_5_is_fencei}, {uops_4_is_fencei}, {uops_3_is_fencei}, {uops_2_is_fencei}, {uops_1_is_fencei}, {uops_0_is_fencei}}; // @[util.scala:505:22, :547:21]
assign out_uop_is_fencei = _GEN_36[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_37 = {{uops_7_is_sfence}, {uops_6_is_sfence}, {uops_5_is_sfence}, {uops_4_is_sfence}, {uops_3_is_sfence}, {uops_2_is_sfence}, {uops_1_is_sfence}, {uops_0_is_sfence}}; // @[util.scala:505:22, :547:21]
assign out_uop_is_sfence = _GEN_37[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_38 = {{uops_7_is_amo}, {uops_6_is_amo}, {uops_5_is_amo}, {uops_4_is_amo}, {uops_3_is_amo}, {uops_2_is_amo}, {uops_1_is_amo}, {uops_0_is_amo}}; // @[util.scala:505:22, :547:21]
assign out_uop_is_amo = _GEN_38[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_39 = {{uops_7_is_eret}, {uops_6_is_eret}, {uops_5_is_eret}, {uops_4_is_eret}, {uops_3_is_eret}, {uops_2_is_eret}, {uops_1_is_eret}, {uops_0_is_eret}}; // @[util.scala:505:22, :547:21]
assign out_uop_is_eret = _GEN_39[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_40 = {{uops_7_is_sys_pc2epc}, {uops_6_is_sys_pc2epc}, {uops_5_is_sys_pc2epc}, {uops_4_is_sys_pc2epc}, {uops_3_is_sys_pc2epc}, {uops_2_is_sys_pc2epc}, {uops_1_is_sys_pc2epc}, {uops_0_is_sys_pc2epc}}; // @[util.scala:505:22, :547:21]
assign out_uop_is_sys_pc2epc = _GEN_40[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_41 = {{uops_7_is_rocc}, {uops_6_is_rocc}, {uops_5_is_rocc}, {uops_4_is_rocc}, {uops_3_is_rocc}, {uops_2_is_rocc}, {uops_1_is_rocc}, {uops_0_is_rocc}}; // @[util.scala:505:22, :547:21]
assign out_uop_is_rocc = _GEN_41[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_42 = {{uops_7_is_mov}, {uops_6_is_mov}, {uops_5_is_mov}, {uops_4_is_mov}, {uops_3_is_mov}, {uops_2_is_mov}, {uops_1_is_mov}, {uops_0_is_mov}}; // @[util.scala:505:22, :547:21]
assign out_uop_is_mov = _GEN_42[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0][4:0] _GEN_43 = {{uops_7_ftq_idx}, {uops_6_ftq_idx}, {uops_5_ftq_idx}, {uops_4_ftq_idx}, {uops_3_ftq_idx}, {uops_2_ftq_idx}, {uops_1_ftq_idx}, {uops_0_ftq_idx}}; // @[util.scala:505:22, :547:21]
assign out_uop_ftq_idx = _GEN_43[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_44 = {{uops_7_edge_inst}, {uops_6_edge_inst}, {uops_5_edge_inst}, {uops_4_edge_inst}, {uops_3_edge_inst}, {uops_2_edge_inst}, {uops_1_edge_inst}, {uops_0_edge_inst}}; // @[util.scala:505:22, :547:21]
assign out_uop_edge_inst = _GEN_44[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0][5:0] _GEN_45 = {{uops_7_pc_lob}, {uops_6_pc_lob}, {uops_5_pc_lob}, {uops_4_pc_lob}, {uops_3_pc_lob}, {uops_2_pc_lob}, {uops_1_pc_lob}, {uops_0_pc_lob}}; // @[util.scala:505:22, :547:21]
assign out_uop_pc_lob = _GEN_45[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_46 = {{uops_7_taken}, {uops_6_taken}, {uops_5_taken}, {uops_4_taken}, {uops_3_taken}, {uops_2_taken}, {uops_1_taken}, {uops_0_taken}}; // @[util.scala:505:22, :547:21]
assign out_uop_taken = _GEN_46[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_47 = {{uops_7_imm_rename}, {uops_6_imm_rename}, {uops_5_imm_rename}, {uops_4_imm_rename}, {uops_3_imm_rename}, {uops_2_imm_rename}, {uops_1_imm_rename}, {uops_0_imm_rename}}; // @[util.scala:505:22, :547:21]
assign out_uop_imm_rename = _GEN_47[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0][2:0] _GEN_48 = {{uops_7_imm_sel}, {uops_6_imm_sel}, {uops_5_imm_sel}, {uops_4_imm_sel}, {uops_3_imm_sel}, {uops_2_imm_sel}, {uops_1_imm_sel}, {uops_0_imm_sel}}; // @[util.scala:505:22, :547:21]
assign out_uop_imm_sel = _GEN_48[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0][4:0] _GEN_49 = {{uops_7_pimm}, {uops_6_pimm}, {uops_5_pimm}, {uops_4_pimm}, {uops_3_pimm}, {uops_2_pimm}, {uops_1_pimm}, {uops_0_pimm}}; // @[util.scala:505:22, :547:21]
assign out_uop_pimm = _GEN_49[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0][19:0] _GEN_50 = {{uops_7_imm_packed}, {uops_6_imm_packed}, {uops_5_imm_packed}, {uops_4_imm_packed}, {uops_3_imm_packed}, {uops_2_imm_packed}, {uops_1_imm_packed}, {uops_0_imm_packed}}; // @[util.scala:505:22, :547:21]
assign out_uop_imm_packed = _GEN_50[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0][1:0] _GEN_51 = {{uops_7_op1_sel}, {uops_6_op1_sel}, {uops_5_op1_sel}, {uops_4_op1_sel}, {uops_3_op1_sel}, {uops_2_op1_sel}, {uops_1_op1_sel}, {uops_0_op1_sel}}; // @[util.scala:505:22, :547:21]
assign out_uop_op1_sel = _GEN_51[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0][2:0] _GEN_52 = {{uops_7_op2_sel}, {uops_6_op2_sel}, {uops_5_op2_sel}, {uops_4_op2_sel}, {uops_3_op2_sel}, {uops_2_op2_sel}, {uops_1_op2_sel}, {uops_0_op2_sel}}; // @[util.scala:505:22, :547:21]
assign out_uop_op2_sel = _GEN_52[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_53 = {{uops_7_fp_ctrl_ldst}, {uops_6_fp_ctrl_ldst}, {uops_5_fp_ctrl_ldst}, {uops_4_fp_ctrl_ldst}, {uops_3_fp_ctrl_ldst}, {uops_2_fp_ctrl_ldst}, {uops_1_fp_ctrl_ldst}, {uops_0_fp_ctrl_ldst}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_ctrl_ldst = _GEN_53[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_54 = {{uops_7_fp_ctrl_wen}, {uops_6_fp_ctrl_wen}, {uops_5_fp_ctrl_wen}, {uops_4_fp_ctrl_wen}, {uops_3_fp_ctrl_wen}, {uops_2_fp_ctrl_wen}, {uops_1_fp_ctrl_wen}, {uops_0_fp_ctrl_wen}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_ctrl_wen = _GEN_54[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_55 = {{uops_7_fp_ctrl_ren1}, {uops_6_fp_ctrl_ren1}, {uops_5_fp_ctrl_ren1}, {uops_4_fp_ctrl_ren1}, {uops_3_fp_ctrl_ren1}, {uops_2_fp_ctrl_ren1}, {uops_1_fp_ctrl_ren1}, {uops_0_fp_ctrl_ren1}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_ctrl_ren1 = _GEN_55[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_56 = {{uops_7_fp_ctrl_ren2}, {uops_6_fp_ctrl_ren2}, {uops_5_fp_ctrl_ren2}, {uops_4_fp_ctrl_ren2}, {uops_3_fp_ctrl_ren2}, {uops_2_fp_ctrl_ren2}, {uops_1_fp_ctrl_ren2}, {uops_0_fp_ctrl_ren2}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_ctrl_ren2 = _GEN_56[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_57 = {{uops_7_fp_ctrl_ren3}, {uops_6_fp_ctrl_ren3}, {uops_5_fp_ctrl_ren3}, {uops_4_fp_ctrl_ren3}, {uops_3_fp_ctrl_ren3}, {uops_2_fp_ctrl_ren3}, {uops_1_fp_ctrl_ren3}, {uops_0_fp_ctrl_ren3}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_ctrl_ren3 = _GEN_57[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_58 = {{uops_7_fp_ctrl_swap12}, {uops_6_fp_ctrl_swap12}, {uops_5_fp_ctrl_swap12}, {uops_4_fp_ctrl_swap12}, {uops_3_fp_ctrl_swap12}, {uops_2_fp_ctrl_swap12}, {uops_1_fp_ctrl_swap12}, {uops_0_fp_ctrl_swap12}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_ctrl_swap12 = _GEN_58[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_59 = {{uops_7_fp_ctrl_swap23}, {uops_6_fp_ctrl_swap23}, {uops_5_fp_ctrl_swap23}, {uops_4_fp_ctrl_swap23}, {uops_3_fp_ctrl_swap23}, {uops_2_fp_ctrl_swap23}, {uops_1_fp_ctrl_swap23}, {uops_0_fp_ctrl_swap23}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_ctrl_swap23 = _GEN_59[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0][1:0] _GEN_60 = {{uops_7_fp_ctrl_typeTagIn}, {uops_6_fp_ctrl_typeTagIn}, {uops_5_fp_ctrl_typeTagIn}, {uops_4_fp_ctrl_typeTagIn}, {uops_3_fp_ctrl_typeTagIn}, {uops_2_fp_ctrl_typeTagIn}, {uops_1_fp_ctrl_typeTagIn}, {uops_0_fp_ctrl_typeTagIn}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_ctrl_typeTagIn = _GEN_60[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0][1:0] _GEN_61 = {{uops_7_fp_ctrl_typeTagOut}, {uops_6_fp_ctrl_typeTagOut}, {uops_5_fp_ctrl_typeTagOut}, {uops_4_fp_ctrl_typeTagOut}, {uops_3_fp_ctrl_typeTagOut}, {uops_2_fp_ctrl_typeTagOut}, {uops_1_fp_ctrl_typeTagOut}, {uops_0_fp_ctrl_typeTagOut}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_ctrl_typeTagOut = _GEN_61[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_62 = {{uops_7_fp_ctrl_fromint}, {uops_6_fp_ctrl_fromint}, {uops_5_fp_ctrl_fromint}, {uops_4_fp_ctrl_fromint}, {uops_3_fp_ctrl_fromint}, {uops_2_fp_ctrl_fromint}, {uops_1_fp_ctrl_fromint}, {uops_0_fp_ctrl_fromint}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_ctrl_fromint = _GEN_62[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_63 = {{uops_7_fp_ctrl_toint}, {uops_6_fp_ctrl_toint}, {uops_5_fp_ctrl_toint}, {uops_4_fp_ctrl_toint}, {uops_3_fp_ctrl_toint}, {uops_2_fp_ctrl_toint}, {uops_1_fp_ctrl_toint}, {uops_0_fp_ctrl_toint}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_ctrl_toint = _GEN_63[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_64 = {{uops_7_fp_ctrl_fastpipe}, {uops_6_fp_ctrl_fastpipe}, {uops_5_fp_ctrl_fastpipe}, {uops_4_fp_ctrl_fastpipe}, {uops_3_fp_ctrl_fastpipe}, {uops_2_fp_ctrl_fastpipe}, {uops_1_fp_ctrl_fastpipe}, {uops_0_fp_ctrl_fastpipe}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_ctrl_fastpipe = _GEN_64[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_65 = {{uops_7_fp_ctrl_fma}, {uops_6_fp_ctrl_fma}, {uops_5_fp_ctrl_fma}, {uops_4_fp_ctrl_fma}, {uops_3_fp_ctrl_fma}, {uops_2_fp_ctrl_fma}, {uops_1_fp_ctrl_fma}, {uops_0_fp_ctrl_fma}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_ctrl_fma = _GEN_65[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_66 = {{uops_7_fp_ctrl_div}, {uops_6_fp_ctrl_div}, {uops_5_fp_ctrl_div}, {uops_4_fp_ctrl_div}, {uops_3_fp_ctrl_div}, {uops_2_fp_ctrl_div}, {uops_1_fp_ctrl_div}, {uops_0_fp_ctrl_div}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_ctrl_div = _GEN_66[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_67 = {{uops_7_fp_ctrl_sqrt}, {uops_6_fp_ctrl_sqrt}, {uops_5_fp_ctrl_sqrt}, {uops_4_fp_ctrl_sqrt}, {uops_3_fp_ctrl_sqrt}, {uops_2_fp_ctrl_sqrt}, {uops_1_fp_ctrl_sqrt}, {uops_0_fp_ctrl_sqrt}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_ctrl_sqrt = _GEN_67[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_68 = {{uops_7_fp_ctrl_wflags}, {uops_6_fp_ctrl_wflags}, {uops_5_fp_ctrl_wflags}, {uops_4_fp_ctrl_wflags}, {uops_3_fp_ctrl_wflags}, {uops_2_fp_ctrl_wflags}, {uops_1_fp_ctrl_wflags}, {uops_0_fp_ctrl_wflags}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_ctrl_wflags = _GEN_68[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_69 = {{uops_7_fp_ctrl_vec}, {uops_6_fp_ctrl_vec}, {uops_5_fp_ctrl_vec}, {uops_4_fp_ctrl_vec}, {uops_3_fp_ctrl_vec}, {uops_2_fp_ctrl_vec}, {uops_1_fp_ctrl_vec}, {uops_0_fp_ctrl_vec}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_ctrl_vec = _GEN_69[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0][6:0] _GEN_70 = {{uops_7_rob_idx}, {uops_6_rob_idx}, {uops_5_rob_idx}, {uops_4_rob_idx}, {uops_3_rob_idx}, {uops_2_rob_idx}, {uops_1_rob_idx}, {uops_0_rob_idx}}; // @[util.scala:505:22, :547:21]
assign out_uop_rob_idx = _GEN_70[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0][4:0] _GEN_71 = {{uops_7_ldq_idx}, {uops_6_ldq_idx}, {uops_5_ldq_idx}, {uops_4_ldq_idx}, {uops_3_ldq_idx}, {uops_2_ldq_idx}, {uops_1_ldq_idx}, {uops_0_ldq_idx}}; // @[util.scala:505:22, :547:21]
assign out_uop_ldq_idx = _GEN_71[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0][4:0] _GEN_72 = {{uops_7_stq_idx}, {uops_6_stq_idx}, {uops_5_stq_idx}, {uops_4_stq_idx}, {uops_3_stq_idx}, {uops_2_stq_idx}, {uops_1_stq_idx}, {uops_0_stq_idx}}; // @[util.scala:505:22, :547:21]
assign out_uop_stq_idx = _GEN_72[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0][1:0] _GEN_73 = {{uops_7_rxq_idx}, {uops_6_rxq_idx}, {uops_5_rxq_idx}, {uops_4_rxq_idx}, {uops_3_rxq_idx}, {uops_2_rxq_idx}, {uops_1_rxq_idx}, {uops_0_rxq_idx}}; // @[util.scala:505:22, :547:21]
assign out_uop_rxq_idx = _GEN_73[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0][6:0] _GEN_74 = {{uops_7_pdst}, {uops_6_pdst}, {uops_5_pdst}, {uops_4_pdst}, {uops_3_pdst}, {uops_2_pdst}, {uops_1_pdst}, {uops_0_pdst}}; // @[util.scala:505:22, :547:21]
assign out_uop_pdst = _GEN_74[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0][6:0] _GEN_75 = {{uops_7_prs1}, {uops_6_prs1}, {uops_5_prs1}, {uops_4_prs1}, {uops_3_prs1}, {uops_2_prs1}, {uops_1_prs1}, {uops_0_prs1}}; // @[util.scala:505:22, :547:21]
assign out_uop_prs1 = _GEN_75[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0][6:0] _GEN_76 = {{uops_7_prs2}, {uops_6_prs2}, {uops_5_prs2}, {uops_4_prs2}, {uops_3_prs2}, {uops_2_prs2}, {uops_1_prs2}, {uops_0_prs2}}; // @[util.scala:505:22, :547:21]
assign out_uop_prs2 = _GEN_76[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0][6:0] _GEN_77 = {{uops_7_prs3}, {uops_6_prs3}, {uops_5_prs3}, {uops_4_prs3}, {uops_3_prs3}, {uops_2_prs3}, {uops_1_prs3}, {uops_0_prs3}}; // @[util.scala:505:22, :547:21]
assign out_uop_prs3 = _GEN_77[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0][4:0] _GEN_78 = {{uops_7_ppred}, {uops_6_ppred}, {uops_5_ppred}, {uops_4_ppred}, {uops_3_ppred}, {uops_2_ppred}, {uops_1_ppred}, {uops_0_ppred}}; // @[util.scala:505:22, :547:21]
assign out_uop_ppred = _GEN_78[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_79 = {{uops_7_prs1_busy}, {uops_6_prs1_busy}, {uops_5_prs1_busy}, {uops_4_prs1_busy}, {uops_3_prs1_busy}, {uops_2_prs1_busy}, {uops_1_prs1_busy}, {uops_0_prs1_busy}}; // @[util.scala:505:22, :547:21]
assign out_uop_prs1_busy = _GEN_79[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_80 = {{uops_7_prs2_busy}, {uops_6_prs2_busy}, {uops_5_prs2_busy}, {uops_4_prs2_busy}, {uops_3_prs2_busy}, {uops_2_prs2_busy}, {uops_1_prs2_busy}, {uops_0_prs2_busy}}; // @[util.scala:505:22, :547:21]
assign out_uop_prs2_busy = _GEN_80[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_81 = {{uops_7_prs3_busy}, {uops_6_prs3_busy}, {uops_5_prs3_busy}, {uops_4_prs3_busy}, {uops_3_prs3_busy}, {uops_2_prs3_busy}, {uops_1_prs3_busy}, {uops_0_prs3_busy}}; // @[util.scala:505:22, :547:21]
assign out_uop_prs3_busy = _GEN_81[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_82 = {{uops_7_ppred_busy}, {uops_6_ppred_busy}, {uops_5_ppred_busy}, {uops_4_ppred_busy}, {uops_3_ppred_busy}, {uops_2_ppred_busy}, {uops_1_ppred_busy}, {uops_0_ppred_busy}}; // @[util.scala:505:22, :547:21]
assign out_uop_ppred_busy = _GEN_82[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0][6:0] _GEN_83 = {{uops_7_stale_pdst}, {uops_6_stale_pdst}, {uops_5_stale_pdst}, {uops_4_stale_pdst}, {uops_3_stale_pdst}, {uops_2_stale_pdst}, {uops_1_stale_pdst}, {uops_0_stale_pdst}}; // @[util.scala:505:22, :547:21]
assign out_uop_stale_pdst = _GEN_83[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_84 = {{uops_7_exception}, {uops_6_exception}, {uops_5_exception}, {uops_4_exception}, {uops_3_exception}, {uops_2_exception}, {uops_1_exception}, {uops_0_exception}}; // @[util.scala:505:22, :547:21]
assign out_uop_exception = _GEN_84[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0][63:0] _GEN_85 = {{uops_7_exc_cause}, {uops_6_exc_cause}, {uops_5_exc_cause}, {uops_4_exc_cause}, {uops_3_exc_cause}, {uops_2_exc_cause}, {uops_1_exc_cause}, {uops_0_exc_cause}}; // @[util.scala:505:22, :547:21]
assign out_uop_exc_cause = _GEN_85[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0][4:0] _GEN_86 = {{uops_7_mem_cmd}, {uops_6_mem_cmd}, {uops_5_mem_cmd}, {uops_4_mem_cmd}, {uops_3_mem_cmd}, {uops_2_mem_cmd}, {uops_1_mem_cmd}, {uops_0_mem_cmd}}; // @[util.scala:505:22, :547:21]
assign out_uop_mem_cmd = _GEN_86[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0][1:0] _GEN_87 = {{uops_7_mem_size}, {uops_6_mem_size}, {uops_5_mem_size}, {uops_4_mem_size}, {uops_3_mem_size}, {uops_2_mem_size}, {uops_1_mem_size}, {uops_0_mem_size}}; // @[util.scala:505:22, :547:21]
assign out_uop_mem_size = _GEN_87[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_88 = {{uops_7_mem_signed}, {uops_6_mem_signed}, {uops_5_mem_signed}, {uops_4_mem_signed}, {uops_3_mem_signed}, {uops_2_mem_signed}, {uops_1_mem_signed}, {uops_0_mem_signed}}; // @[util.scala:505:22, :547:21]
assign out_uop_mem_signed = _GEN_88[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_89 = {{uops_7_uses_ldq}, {uops_6_uses_ldq}, {uops_5_uses_ldq}, {uops_4_uses_ldq}, {uops_3_uses_ldq}, {uops_2_uses_ldq}, {uops_1_uses_ldq}, {uops_0_uses_ldq}}; // @[util.scala:505:22, :547:21]
assign out_uop_uses_ldq = _GEN_89[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_90 = {{uops_7_uses_stq}, {uops_6_uses_stq}, {uops_5_uses_stq}, {uops_4_uses_stq}, {uops_3_uses_stq}, {uops_2_uses_stq}, {uops_1_uses_stq}, {uops_0_uses_stq}}; // @[util.scala:505:22, :547:21]
assign out_uop_uses_stq = _GEN_90[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_91 = {{uops_7_is_unique}, {uops_6_is_unique}, {uops_5_is_unique}, {uops_4_is_unique}, {uops_3_is_unique}, {uops_2_is_unique}, {uops_1_is_unique}, {uops_0_is_unique}}; // @[util.scala:505:22, :547:21]
assign out_uop_is_unique = _GEN_91[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_92 = {{uops_7_flush_on_commit}, {uops_6_flush_on_commit}, {uops_5_flush_on_commit}, {uops_4_flush_on_commit}, {uops_3_flush_on_commit}, {uops_2_flush_on_commit}, {uops_1_flush_on_commit}, {uops_0_flush_on_commit}}; // @[util.scala:505:22, :547:21]
assign out_uop_flush_on_commit = _GEN_92[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0][2:0] _GEN_93 = {{uops_7_csr_cmd}, {uops_6_csr_cmd}, {uops_5_csr_cmd}, {uops_4_csr_cmd}, {uops_3_csr_cmd}, {uops_2_csr_cmd}, {uops_1_csr_cmd}, {uops_0_csr_cmd}}; // @[util.scala:505:22, :547:21]
assign out_uop_csr_cmd = _GEN_93[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_94 = {{uops_7_ldst_is_rs1}, {uops_6_ldst_is_rs1}, {uops_5_ldst_is_rs1}, {uops_4_ldst_is_rs1}, {uops_3_ldst_is_rs1}, {uops_2_ldst_is_rs1}, {uops_1_ldst_is_rs1}, {uops_0_ldst_is_rs1}}; // @[util.scala:505:22, :547:21]
assign out_uop_ldst_is_rs1 = _GEN_94[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0][5:0] _GEN_95 = {{uops_7_ldst}, {uops_6_ldst}, {uops_5_ldst}, {uops_4_ldst}, {uops_3_ldst}, {uops_2_ldst}, {uops_1_ldst}, {uops_0_ldst}}; // @[util.scala:505:22, :547:21]
assign out_uop_ldst = _GEN_95[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0][5:0] _GEN_96 = {{uops_7_lrs1}, {uops_6_lrs1}, {uops_5_lrs1}, {uops_4_lrs1}, {uops_3_lrs1}, {uops_2_lrs1}, {uops_1_lrs1}, {uops_0_lrs1}}; // @[util.scala:505:22, :547:21]
assign out_uop_lrs1 = _GEN_96[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0][5:0] _GEN_97 = {{uops_7_lrs2}, {uops_6_lrs2}, {uops_5_lrs2}, {uops_4_lrs2}, {uops_3_lrs2}, {uops_2_lrs2}, {uops_1_lrs2}, {uops_0_lrs2}}; // @[util.scala:505:22, :547:21]
assign out_uop_lrs2 = _GEN_97[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0][5:0] _GEN_98 = {{uops_7_lrs3}, {uops_6_lrs3}, {uops_5_lrs3}, {uops_4_lrs3}, {uops_3_lrs3}, {uops_2_lrs3}, {uops_1_lrs3}, {uops_0_lrs3}}; // @[util.scala:505:22, :547:21]
assign out_uop_lrs3 = _GEN_98[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0][1:0] _GEN_99 = {{uops_7_dst_rtype}, {uops_6_dst_rtype}, {uops_5_dst_rtype}, {uops_4_dst_rtype}, {uops_3_dst_rtype}, {uops_2_dst_rtype}, {uops_1_dst_rtype}, {uops_0_dst_rtype}}; // @[util.scala:505:22, :547:21]
assign out_uop_dst_rtype = _GEN_99[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0][1:0] _GEN_100 = {{uops_7_lrs1_rtype}, {uops_6_lrs1_rtype}, {uops_5_lrs1_rtype}, {uops_4_lrs1_rtype}, {uops_3_lrs1_rtype}, {uops_2_lrs1_rtype}, {uops_1_lrs1_rtype}, {uops_0_lrs1_rtype}}; // @[util.scala:505:22, :547:21]
assign out_uop_lrs1_rtype = _GEN_100[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0][1:0] _GEN_101 = {{uops_7_lrs2_rtype}, {uops_6_lrs2_rtype}, {uops_5_lrs2_rtype}, {uops_4_lrs2_rtype}, {uops_3_lrs2_rtype}, {uops_2_lrs2_rtype}, {uops_1_lrs2_rtype}, {uops_0_lrs2_rtype}}; // @[util.scala:505:22, :547:21]
assign out_uop_lrs2_rtype = _GEN_101[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_102 = {{uops_7_frs3_en}, {uops_6_frs3_en}, {uops_5_frs3_en}, {uops_4_frs3_en}, {uops_3_frs3_en}, {uops_2_frs3_en}, {uops_1_frs3_en}, {uops_0_frs3_en}}; // @[util.scala:505:22, :547:21]
assign out_uop_frs3_en = _GEN_102[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_103 = {{uops_7_fcn_dw}, {uops_6_fcn_dw}, {uops_5_fcn_dw}, {uops_4_fcn_dw}, {uops_3_fcn_dw}, {uops_2_fcn_dw}, {uops_1_fcn_dw}, {uops_0_fcn_dw}}; // @[util.scala:505:22, :547:21]
assign out_uop_fcn_dw = _GEN_103[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0][4:0] _GEN_104 = {{uops_7_fcn_op}, {uops_6_fcn_op}, {uops_5_fcn_op}, {uops_4_fcn_op}, {uops_3_fcn_op}, {uops_2_fcn_op}, {uops_1_fcn_op}, {uops_0_fcn_op}}; // @[util.scala:505:22, :547:21]
assign out_uop_fcn_op = _GEN_104[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_105 = {{uops_7_fp_val}, {uops_6_fp_val}, {uops_5_fp_val}, {uops_4_fp_val}, {uops_3_fp_val}, {uops_2_fp_val}, {uops_1_fp_val}, {uops_0_fp_val}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_val = _GEN_105[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0][2:0] _GEN_106 = {{uops_7_fp_rm}, {uops_6_fp_rm}, {uops_5_fp_rm}, {uops_4_fp_rm}, {uops_3_fp_rm}, {uops_2_fp_rm}, {uops_1_fp_rm}, {uops_0_fp_rm}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_rm = _GEN_106[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0][1:0] _GEN_107 = {{uops_7_fp_typ}, {uops_6_fp_typ}, {uops_5_fp_typ}, {uops_4_fp_typ}, {uops_3_fp_typ}, {uops_2_fp_typ}, {uops_1_fp_typ}, {uops_0_fp_typ}}; // @[util.scala:505:22, :547:21]
assign out_uop_fp_typ = _GEN_107[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_108 = {{uops_7_xcpt_pf_if}, {uops_6_xcpt_pf_if}, {uops_5_xcpt_pf_if}, {uops_4_xcpt_pf_if}, {uops_3_xcpt_pf_if}, {uops_2_xcpt_pf_if}, {uops_1_xcpt_pf_if}, {uops_0_xcpt_pf_if}}; // @[util.scala:505:22, :547:21]
assign out_uop_xcpt_pf_if = _GEN_108[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_109 = {{uops_7_xcpt_ae_if}, {uops_6_xcpt_ae_if}, {uops_5_xcpt_ae_if}, {uops_4_xcpt_ae_if}, {uops_3_xcpt_ae_if}, {uops_2_xcpt_ae_if}, {uops_1_xcpt_ae_if}, {uops_0_xcpt_ae_if}}; // @[util.scala:505:22, :547:21]
assign out_uop_xcpt_ae_if = _GEN_109[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_110 = {{uops_7_xcpt_ma_if}, {uops_6_xcpt_ma_if}, {uops_5_xcpt_ma_if}, {uops_4_xcpt_ma_if}, {uops_3_xcpt_ma_if}, {uops_2_xcpt_ma_if}, {uops_1_xcpt_ma_if}, {uops_0_xcpt_ma_if}}; // @[util.scala:505:22, :547:21]
assign out_uop_xcpt_ma_if = _GEN_110[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_111 = {{uops_7_bp_debug_if}, {uops_6_bp_debug_if}, {uops_5_bp_debug_if}, {uops_4_bp_debug_if}, {uops_3_bp_debug_if}, {uops_2_bp_debug_if}, {uops_1_bp_debug_if}, {uops_0_bp_debug_if}}; // @[util.scala:505:22, :547:21]
assign out_uop_bp_debug_if = _GEN_111[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0] _GEN_112 = {{uops_7_bp_xcpt_if}, {uops_6_bp_xcpt_if}, {uops_5_bp_xcpt_if}, {uops_4_bp_xcpt_if}, {uops_3_bp_xcpt_if}, {uops_2_bp_xcpt_if}, {uops_1_bp_xcpt_if}, {uops_0_bp_xcpt_if}}; // @[util.scala:505:22, :547:21]
assign out_uop_bp_xcpt_if = _GEN_112[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0][2:0] _GEN_113 = {{uops_7_debug_fsrc}, {uops_6_debug_fsrc}, {uops_5_debug_fsrc}, {uops_4_debug_fsrc}, {uops_3_debug_fsrc}, {uops_2_debug_fsrc}, {uops_1_debug_fsrc}, {uops_0_debug_fsrc}}; // @[util.scala:505:22, :547:21]
assign out_uop_debug_fsrc = _GEN_113[deq_ptr_value]; // @[Counter.scala:61:40]
wire [7:0][2:0] _GEN_114 = {{uops_7_debug_tsrc}, {uops_6_debug_tsrc}, {uops_5_debug_tsrc}, {uops_4_debug_tsrc}, {uops_3_debug_tsrc}, {uops_2_debug_tsrc}, {uops_1_debug_tsrc}, {uops_0_debug_tsrc}}; // @[util.scala:505:22, :547:21]
assign out_uop_debug_tsrc = _GEN_114[deq_ptr_value]; // @[Counter.scala:61:40]
wire _io_deq_valid_T = ~io_empty; // @[util.scala:458:7, :515:71, :548:32]
assign _io_deq_valid_T_1 = _io_deq_valid_T & _GEN_1; // @[util.scala:515:44, :548:{32,42}]
assign io_deq_valid_0 = _io_deq_valid_T_1; // @[util.scala:458:7, :548:42]
wire [3:0] _ptr_diff_T = _GEN_2 - _GEN_3; // @[Counter.scala:77:24]
wire [2:0] ptr_diff = _ptr_diff_T[2:0]; // @[util.scala:551:34]
wire [3:0] _io_count_T_1 = {_io_count_T, ptr_diff}; // @[util.scala:551:34, :553:{22,34}]
assign io_count_0 = _io_count_T_1[2:0]; // @[util.scala:458:7, :553:{16,22}]
wire _GEN_115 = enq_ptr_value == 3'h0; // @[Counter.scala:61:40]
wire _GEN_116 = do_enq & _GEN_115; // @[util.scala:514:26, :520:18, :526:19, :528:35]
wire _GEN_117 = enq_ptr_value == 3'h1; // @[Counter.scala:61:40]
wire _GEN_118 = do_enq & _GEN_117; // @[util.scala:514:26, :520:18, :526:19, :528:35]
wire _GEN_119 = enq_ptr_value == 3'h2; // @[Counter.scala:61:40]
wire _GEN_120 = do_enq & _GEN_119; // @[util.scala:514:26, :520:18, :526:19, :528:35]
wire _GEN_121 = enq_ptr_value == 3'h3; // @[Counter.scala:61:40]
wire _GEN_122 = do_enq & _GEN_121; // @[util.scala:514:26, :520:18, :526:19, :528:35]
wire _GEN_123 = enq_ptr_value == 3'h4; // @[Counter.scala:61:40]
wire _GEN_124 = do_enq & _GEN_123; // @[util.scala:514:26, :520:18, :526:19, :528:35]
wire _GEN_125 = enq_ptr_value == 3'h5; // @[Counter.scala:61:40]
wire _GEN_126 = do_enq & _GEN_125; // @[util.scala:514:26, :520:18, :526:19, :528:35]
wire _GEN_127 = enq_ptr_value == 3'h6; // @[Counter.scala:61:40]
wire _GEN_128 = do_enq & _GEN_127; // @[util.scala:514:26, :520:18, :526:19, :528:35]
wire _GEN_129 = do_enq & (&enq_ptr_value); // @[Counter.scala:61:40]
always @(posedge clock) begin // @[util.scala:458:7]
if (reset) begin // @[util.scala:458:7]
valids_0 <= 1'h0; // @[util.scala:504:26]
valids_1 <= 1'h0; // @[util.scala:504:26]
valids_2 <= 1'h0; // @[util.scala:504:26]
valids_3 <= 1'h0; // @[util.scala:504:26]
valids_4 <= 1'h0; // @[util.scala:504:26]
valids_5 <= 1'h0; // @[util.scala:504:26]
valids_6 <= 1'h0; // @[util.scala:504:26]
valids_7 <= 1'h0; // @[util.scala:504:26]
enq_ptr_value <= 3'h0; // @[Counter.scala:61:40]
deq_ptr_value <= 3'h0; // @[Counter.scala:61:40]
maybe_full <= 1'h0; // @[util.scala:509:29]
end
else begin // @[util.scala:458:7]
valids_0 <= ~(do_deq & deq_ptr_value == 3'h0) & (_GEN_116 | _valids_0_T_7); // @[Counter.scala:61:40]
valids_1 <= ~(do_deq & deq_ptr_value == 3'h1) & (_GEN_118 | _valids_1_T_7); // @[Counter.scala:61:40]
valids_2 <= ~(do_deq & deq_ptr_value == 3'h2) & (_GEN_120 | _valids_2_T_7); // @[Counter.scala:61:40]
valids_3 <= ~(do_deq & deq_ptr_value == 3'h3) & (_GEN_122 | _valids_3_T_7); // @[Counter.scala:61:40]
valids_4 <= ~(do_deq & deq_ptr_value == 3'h4) & (_GEN_124 | _valids_4_T_7); // @[Counter.scala:61:40]
valids_5 <= ~(do_deq & deq_ptr_value == 3'h5) & (_GEN_126 | _valids_5_T_7); // @[Counter.scala:61:40]
valids_6 <= ~(do_deq & deq_ptr_value == 3'h6) & (_GEN_128 | _valids_6_T_7); // @[Counter.scala:61:40]
valids_7 <= ~(do_deq & (&deq_ptr_value)) & (_GEN_129 | _valids_7_T_7); // @[Counter.scala:61:40]
if (do_enq) // @[util.scala:514:26]
enq_ptr_value <= _value_T_1; // @[Counter.scala:61:40, :77:24]
if (do_deq) // @[util.scala:515:26]
deq_ptr_value <= _value_T_3; // @[Counter.scala:61:40, :77:24]
if (~(do_enq == do_deq)) // @[util.scala:509:29, :514:26, :515:26, :539:{18,30}, :540:18]
maybe_full <= do_enq; // @[util.scala:509:29, :514:26]
end
if (_GEN_116) begin // @[util.scala:520:18, :526:19, :528:35]
uops_0_inst <= io_enq_bits_uop_inst_0; // @[util.scala:458:7, :505:22]
uops_0_debug_inst <= io_enq_bits_uop_debug_inst_0; // @[util.scala:458:7, :505:22]
uops_0_is_rvc <= io_enq_bits_uop_is_rvc_0; // @[util.scala:458:7, :505:22]
uops_0_debug_pc <= io_enq_bits_uop_debug_pc_0; // @[util.scala:458:7, :505:22]
uops_0_iq_type_0 <= io_enq_bits_uop_iq_type_0_0; // @[util.scala:458:7, :505:22]
uops_0_iq_type_1 <= io_enq_bits_uop_iq_type_1_0; // @[util.scala:458:7, :505:22]
uops_0_iq_type_2 <= io_enq_bits_uop_iq_type_2_0; // @[util.scala:458:7, :505:22]
uops_0_iq_type_3 <= io_enq_bits_uop_iq_type_3_0; // @[util.scala:458:7, :505:22]
uops_0_fu_code_0 <= io_enq_bits_uop_fu_code_0_0; // @[util.scala:458:7, :505:22]
uops_0_fu_code_1 <= io_enq_bits_uop_fu_code_1_0; // @[util.scala:458:7, :505:22]
uops_0_fu_code_2 <= io_enq_bits_uop_fu_code_2_0; // @[util.scala:458:7, :505:22]
uops_0_fu_code_3 <= io_enq_bits_uop_fu_code_3_0; // @[util.scala:458:7, :505:22]
uops_0_fu_code_4 <= io_enq_bits_uop_fu_code_4_0; // @[util.scala:458:7, :505:22]
uops_0_fu_code_5 <= io_enq_bits_uop_fu_code_5_0; // @[util.scala:458:7, :505:22]
uops_0_fu_code_6 <= io_enq_bits_uop_fu_code_6_0; // @[util.scala:458:7, :505:22]
uops_0_fu_code_7 <= io_enq_bits_uop_fu_code_7_0; // @[util.scala:458:7, :505:22]
uops_0_fu_code_8 <= io_enq_bits_uop_fu_code_8_0; // @[util.scala:458:7, :505:22]
uops_0_fu_code_9 <= io_enq_bits_uop_fu_code_9_0; // @[util.scala:458:7, :505:22]
uops_0_iw_issued <= io_enq_bits_uop_iw_issued_0; // @[util.scala:458:7, :505:22]
uops_0_iw_issued_partial_agen <= io_enq_bits_uop_iw_issued_partial_agen_0; // @[util.scala:458:7, :505:22]
uops_0_iw_issued_partial_dgen <= io_enq_bits_uop_iw_issued_partial_dgen_0; // @[util.scala:458:7, :505:22]
uops_0_iw_p1_speculative_child <= io_enq_bits_uop_iw_p1_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_0_iw_p2_speculative_child <= io_enq_bits_uop_iw_p2_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_0_iw_p1_bypass_hint <= io_enq_bits_uop_iw_p1_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_0_iw_p2_bypass_hint <= io_enq_bits_uop_iw_p2_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_0_iw_p3_bypass_hint <= io_enq_bits_uop_iw_p3_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_0_dis_col_sel <= io_enq_bits_uop_dis_col_sel_0; // @[util.scala:458:7, :505:22]
uops_0_br_tag <= io_enq_bits_uop_br_tag_0; // @[util.scala:458:7, :505:22]
uops_0_br_type <= io_enq_bits_uop_br_type_0; // @[util.scala:458:7, :505:22]
uops_0_is_sfb <= io_enq_bits_uop_is_sfb_0; // @[util.scala:458:7, :505:22]
uops_0_is_fence <= io_enq_bits_uop_is_fence_0; // @[util.scala:458:7, :505:22]
uops_0_is_fencei <= io_enq_bits_uop_is_fencei_0; // @[util.scala:458:7, :505:22]
uops_0_is_sfence <= io_enq_bits_uop_is_sfence_0; // @[util.scala:458:7, :505:22]
uops_0_is_amo <= io_enq_bits_uop_is_amo_0; // @[util.scala:458:7, :505:22]
uops_0_is_eret <= io_enq_bits_uop_is_eret_0; // @[util.scala:458:7, :505:22]
uops_0_is_sys_pc2epc <= io_enq_bits_uop_is_sys_pc2epc_0; // @[util.scala:458:7, :505:22]
uops_0_is_rocc <= io_enq_bits_uop_is_rocc_0; // @[util.scala:458:7, :505:22]
uops_0_is_mov <= io_enq_bits_uop_is_mov_0; // @[util.scala:458:7, :505:22]
uops_0_ftq_idx <= io_enq_bits_uop_ftq_idx_0; // @[util.scala:458:7, :505:22]
uops_0_edge_inst <= io_enq_bits_uop_edge_inst_0; // @[util.scala:458:7, :505:22]
uops_0_pc_lob <= io_enq_bits_uop_pc_lob_0; // @[util.scala:458:7, :505:22]
uops_0_taken <= io_enq_bits_uop_taken_0; // @[util.scala:458:7, :505:22]
uops_0_imm_rename <= io_enq_bits_uop_imm_rename_0; // @[util.scala:458:7, :505:22]
uops_0_imm_sel <= io_enq_bits_uop_imm_sel_0; // @[util.scala:458:7, :505:22]
uops_0_pimm <= io_enq_bits_uop_pimm_0; // @[util.scala:458:7, :505:22]
uops_0_imm_packed <= io_enq_bits_uop_imm_packed_0; // @[util.scala:458:7, :505:22]
uops_0_op1_sel <= io_enq_bits_uop_op1_sel_0; // @[util.scala:458:7, :505:22]
uops_0_op2_sel <= io_enq_bits_uop_op2_sel_0; // @[util.scala:458:7, :505:22]
uops_0_fp_ctrl_ldst <= io_enq_bits_uop_fp_ctrl_ldst_0; // @[util.scala:458:7, :505:22]
uops_0_fp_ctrl_wen <= io_enq_bits_uop_fp_ctrl_wen_0; // @[util.scala:458:7, :505:22]
uops_0_fp_ctrl_ren1 <= io_enq_bits_uop_fp_ctrl_ren1_0; // @[util.scala:458:7, :505:22]
uops_0_fp_ctrl_ren2 <= io_enq_bits_uop_fp_ctrl_ren2_0; // @[util.scala:458:7, :505:22]
uops_0_fp_ctrl_ren3 <= io_enq_bits_uop_fp_ctrl_ren3_0; // @[util.scala:458:7, :505:22]
uops_0_fp_ctrl_swap12 <= io_enq_bits_uop_fp_ctrl_swap12_0; // @[util.scala:458:7, :505:22]
uops_0_fp_ctrl_swap23 <= io_enq_bits_uop_fp_ctrl_swap23_0; // @[util.scala:458:7, :505:22]
uops_0_fp_ctrl_typeTagIn <= io_enq_bits_uop_fp_ctrl_typeTagIn_0; // @[util.scala:458:7, :505:22]
uops_0_fp_ctrl_typeTagOut <= io_enq_bits_uop_fp_ctrl_typeTagOut_0; // @[util.scala:458:7, :505:22]
uops_0_fp_ctrl_fromint <= io_enq_bits_uop_fp_ctrl_fromint_0; // @[util.scala:458:7, :505:22]
uops_0_fp_ctrl_toint <= io_enq_bits_uop_fp_ctrl_toint_0; // @[util.scala:458:7, :505:22]
uops_0_fp_ctrl_fastpipe <= io_enq_bits_uop_fp_ctrl_fastpipe_0; // @[util.scala:458:7, :505:22]
uops_0_fp_ctrl_fma <= io_enq_bits_uop_fp_ctrl_fma_0; // @[util.scala:458:7, :505:22]
uops_0_fp_ctrl_div <= io_enq_bits_uop_fp_ctrl_div_0; // @[util.scala:458:7, :505:22]
uops_0_fp_ctrl_sqrt <= io_enq_bits_uop_fp_ctrl_sqrt_0; // @[util.scala:458:7, :505:22]
uops_0_fp_ctrl_wflags <= io_enq_bits_uop_fp_ctrl_wflags_0; // @[util.scala:458:7, :505:22]
uops_0_fp_ctrl_vec <= io_enq_bits_uop_fp_ctrl_vec_0; // @[util.scala:458:7, :505:22]
uops_0_rob_idx <= io_enq_bits_uop_rob_idx_0; // @[util.scala:458:7, :505:22]
uops_0_ldq_idx <= io_enq_bits_uop_ldq_idx_0; // @[util.scala:458:7, :505:22]
uops_0_stq_idx <= io_enq_bits_uop_stq_idx_0; // @[util.scala:458:7, :505:22]
uops_0_rxq_idx <= io_enq_bits_uop_rxq_idx_0; // @[util.scala:458:7, :505:22]
uops_0_pdst <= io_enq_bits_uop_pdst_0; // @[util.scala:458:7, :505:22]
uops_0_prs1 <= io_enq_bits_uop_prs1_0; // @[util.scala:458:7, :505:22]
uops_0_prs2 <= io_enq_bits_uop_prs2_0; // @[util.scala:458:7, :505:22]
uops_0_prs3 <= io_enq_bits_uop_prs3_0; // @[util.scala:458:7, :505:22]
uops_0_ppred <= io_enq_bits_uop_ppred_0; // @[util.scala:458:7, :505:22]
uops_0_prs1_busy <= io_enq_bits_uop_prs1_busy_0; // @[util.scala:458:7, :505:22]
uops_0_prs2_busy <= io_enq_bits_uop_prs2_busy_0; // @[util.scala:458:7, :505:22]
uops_0_prs3_busy <= io_enq_bits_uop_prs3_busy_0; // @[util.scala:458:7, :505:22]
uops_0_ppred_busy <= io_enq_bits_uop_ppred_busy_0; // @[util.scala:458:7, :505:22]
uops_0_stale_pdst <= io_enq_bits_uop_stale_pdst_0; // @[util.scala:458:7, :505:22]
uops_0_exception <= io_enq_bits_uop_exception_0; // @[util.scala:458:7, :505:22]
uops_0_exc_cause <= io_enq_bits_uop_exc_cause_0; // @[util.scala:458:7, :505:22]
uops_0_mem_cmd <= io_enq_bits_uop_mem_cmd_0; // @[util.scala:458:7, :505:22]
uops_0_mem_size <= io_enq_bits_uop_mem_size_0; // @[util.scala:458:7, :505:22]
uops_0_mem_signed <= io_enq_bits_uop_mem_signed_0; // @[util.scala:458:7, :505:22]
uops_0_uses_ldq <= io_enq_bits_uop_uses_ldq_0; // @[util.scala:458:7, :505:22]
uops_0_uses_stq <= io_enq_bits_uop_uses_stq_0; // @[util.scala:458:7, :505:22]
uops_0_is_unique <= io_enq_bits_uop_is_unique_0; // @[util.scala:458:7, :505:22]
uops_0_flush_on_commit <= io_enq_bits_uop_flush_on_commit_0; // @[util.scala:458:7, :505:22]
uops_0_csr_cmd <= io_enq_bits_uop_csr_cmd_0; // @[util.scala:458:7, :505:22]
uops_0_ldst_is_rs1 <= io_enq_bits_uop_ldst_is_rs1_0; // @[util.scala:458:7, :505:22]
uops_0_ldst <= io_enq_bits_uop_ldst_0; // @[util.scala:458:7, :505:22]
uops_0_lrs1 <= io_enq_bits_uop_lrs1_0; // @[util.scala:458:7, :505:22]
uops_0_lrs2 <= io_enq_bits_uop_lrs2_0; // @[util.scala:458:7, :505:22]
uops_0_lrs3 <= io_enq_bits_uop_lrs3_0; // @[util.scala:458:7, :505:22]
uops_0_dst_rtype <= io_enq_bits_uop_dst_rtype_0; // @[util.scala:458:7, :505:22]
uops_0_lrs1_rtype <= io_enq_bits_uop_lrs1_rtype_0; // @[util.scala:458:7, :505:22]
uops_0_lrs2_rtype <= io_enq_bits_uop_lrs2_rtype_0; // @[util.scala:458:7, :505:22]
uops_0_frs3_en <= io_enq_bits_uop_frs3_en_0; // @[util.scala:458:7, :505:22]
uops_0_fcn_dw <= io_enq_bits_uop_fcn_dw_0; // @[util.scala:458:7, :505:22]
uops_0_fcn_op <= io_enq_bits_uop_fcn_op_0; // @[util.scala:458:7, :505:22]
uops_0_fp_val <= io_enq_bits_uop_fp_val_0; // @[util.scala:458:7, :505:22]
uops_0_fp_rm <= io_enq_bits_uop_fp_rm_0; // @[util.scala:458:7, :505:22]
uops_0_fp_typ <= io_enq_bits_uop_fp_typ_0; // @[util.scala:458:7, :505:22]
uops_0_xcpt_pf_if <= io_enq_bits_uop_xcpt_pf_if_0; // @[util.scala:458:7, :505:22]
uops_0_xcpt_ae_if <= io_enq_bits_uop_xcpt_ae_if_0; // @[util.scala:458:7, :505:22]
uops_0_xcpt_ma_if <= io_enq_bits_uop_xcpt_ma_if_0; // @[util.scala:458:7, :505:22]
uops_0_bp_debug_if <= io_enq_bits_uop_bp_debug_if_0; // @[util.scala:458:7, :505:22]
uops_0_bp_xcpt_if <= io_enq_bits_uop_bp_xcpt_if_0; // @[util.scala:458:7, :505:22]
uops_0_debug_fsrc <= io_enq_bits_uop_debug_fsrc_0; // @[util.scala:458:7, :505:22]
uops_0_debug_tsrc <= io_enq_bits_uop_debug_tsrc_0; // @[util.scala:458:7, :505:22]
end
if (do_enq & _GEN_115) // @[util.scala:514:26, :521:24, :526:19, :528:35, :530:35]
uops_0_br_mask <= _uops_br_mask_T_1; // @[util.scala:93:25, :505:22]
else if (valids_0) // @[util.scala:504:26]
uops_0_br_mask <= _uops_0_br_mask_T_1; // @[util.scala:97:21, :505:22]
if (_GEN_118) begin // @[util.scala:520:18, :526:19, :528:35]
uops_1_inst <= io_enq_bits_uop_inst_0; // @[util.scala:458:7, :505:22]
uops_1_debug_inst <= io_enq_bits_uop_debug_inst_0; // @[util.scala:458:7, :505:22]
uops_1_is_rvc <= io_enq_bits_uop_is_rvc_0; // @[util.scala:458:7, :505:22]
uops_1_debug_pc <= io_enq_bits_uop_debug_pc_0; // @[util.scala:458:7, :505:22]
uops_1_iq_type_0 <= io_enq_bits_uop_iq_type_0_0; // @[util.scala:458:7, :505:22]
uops_1_iq_type_1 <= io_enq_bits_uop_iq_type_1_0; // @[util.scala:458:7, :505:22]
uops_1_iq_type_2 <= io_enq_bits_uop_iq_type_2_0; // @[util.scala:458:7, :505:22]
uops_1_iq_type_3 <= io_enq_bits_uop_iq_type_3_0; // @[util.scala:458:7, :505:22]
uops_1_fu_code_0 <= io_enq_bits_uop_fu_code_0_0; // @[util.scala:458:7, :505:22]
uops_1_fu_code_1 <= io_enq_bits_uop_fu_code_1_0; // @[util.scala:458:7, :505:22]
uops_1_fu_code_2 <= io_enq_bits_uop_fu_code_2_0; // @[util.scala:458:7, :505:22]
uops_1_fu_code_3 <= io_enq_bits_uop_fu_code_3_0; // @[util.scala:458:7, :505:22]
uops_1_fu_code_4 <= io_enq_bits_uop_fu_code_4_0; // @[util.scala:458:7, :505:22]
uops_1_fu_code_5 <= io_enq_bits_uop_fu_code_5_0; // @[util.scala:458:7, :505:22]
uops_1_fu_code_6 <= io_enq_bits_uop_fu_code_6_0; // @[util.scala:458:7, :505:22]
uops_1_fu_code_7 <= io_enq_bits_uop_fu_code_7_0; // @[util.scala:458:7, :505:22]
uops_1_fu_code_8 <= io_enq_bits_uop_fu_code_8_0; // @[util.scala:458:7, :505:22]
uops_1_fu_code_9 <= io_enq_bits_uop_fu_code_9_0; // @[util.scala:458:7, :505:22]
uops_1_iw_issued <= io_enq_bits_uop_iw_issued_0; // @[util.scala:458:7, :505:22]
uops_1_iw_issued_partial_agen <= io_enq_bits_uop_iw_issued_partial_agen_0; // @[util.scala:458:7, :505:22]
uops_1_iw_issued_partial_dgen <= io_enq_bits_uop_iw_issued_partial_dgen_0; // @[util.scala:458:7, :505:22]
uops_1_iw_p1_speculative_child <= io_enq_bits_uop_iw_p1_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_1_iw_p2_speculative_child <= io_enq_bits_uop_iw_p2_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_1_iw_p1_bypass_hint <= io_enq_bits_uop_iw_p1_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_1_iw_p2_bypass_hint <= io_enq_bits_uop_iw_p2_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_1_iw_p3_bypass_hint <= io_enq_bits_uop_iw_p3_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_1_dis_col_sel <= io_enq_bits_uop_dis_col_sel_0; // @[util.scala:458:7, :505:22]
uops_1_br_tag <= io_enq_bits_uop_br_tag_0; // @[util.scala:458:7, :505:22]
uops_1_br_type <= io_enq_bits_uop_br_type_0; // @[util.scala:458:7, :505:22]
uops_1_is_sfb <= io_enq_bits_uop_is_sfb_0; // @[util.scala:458:7, :505:22]
uops_1_is_fence <= io_enq_bits_uop_is_fence_0; // @[util.scala:458:7, :505:22]
uops_1_is_fencei <= io_enq_bits_uop_is_fencei_0; // @[util.scala:458:7, :505:22]
uops_1_is_sfence <= io_enq_bits_uop_is_sfence_0; // @[util.scala:458:7, :505:22]
uops_1_is_amo <= io_enq_bits_uop_is_amo_0; // @[util.scala:458:7, :505:22]
uops_1_is_eret <= io_enq_bits_uop_is_eret_0; // @[util.scala:458:7, :505:22]
uops_1_is_sys_pc2epc <= io_enq_bits_uop_is_sys_pc2epc_0; // @[util.scala:458:7, :505:22]
uops_1_is_rocc <= io_enq_bits_uop_is_rocc_0; // @[util.scala:458:7, :505:22]
uops_1_is_mov <= io_enq_bits_uop_is_mov_0; // @[util.scala:458:7, :505:22]
uops_1_ftq_idx <= io_enq_bits_uop_ftq_idx_0; // @[util.scala:458:7, :505:22]
uops_1_edge_inst <= io_enq_bits_uop_edge_inst_0; // @[util.scala:458:7, :505:22]
uops_1_pc_lob <= io_enq_bits_uop_pc_lob_0; // @[util.scala:458:7, :505:22]
uops_1_taken <= io_enq_bits_uop_taken_0; // @[util.scala:458:7, :505:22]
uops_1_imm_rename <= io_enq_bits_uop_imm_rename_0; // @[util.scala:458:7, :505:22]
uops_1_imm_sel <= io_enq_bits_uop_imm_sel_0; // @[util.scala:458:7, :505:22]
uops_1_pimm <= io_enq_bits_uop_pimm_0; // @[util.scala:458:7, :505:22]
uops_1_imm_packed <= io_enq_bits_uop_imm_packed_0; // @[util.scala:458:7, :505:22]
uops_1_op1_sel <= io_enq_bits_uop_op1_sel_0; // @[util.scala:458:7, :505:22]
uops_1_op2_sel <= io_enq_bits_uop_op2_sel_0; // @[util.scala:458:7, :505:22]
uops_1_fp_ctrl_ldst <= io_enq_bits_uop_fp_ctrl_ldst_0; // @[util.scala:458:7, :505:22]
uops_1_fp_ctrl_wen <= io_enq_bits_uop_fp_ctrl_wen_0; // @[util.scala:458:7, :505:22]
uops_1_fp_ctrl_ren1 <= io_enq_bits_uop_fp_ctrl_ren1_0; // @[util.scala:458:7, :505:22]
uops_1_fp_ctrl_ren2 <= io_enq_bits_uop_fp_ctrl_ren2_0; // @[util.scala:458:7, :505:22]
uops_1_fp_ctrl_ren3 <= io_enq_bits_uop_fp_ctrl_ren3_0; // @[util.scala:458:7, :505:22]
uops_1_fp_ctrl_swap12 <= io_enq_bits_uop_fp_ctrl_swap12_0; // @[util.scala:458:7, :505:22]
uops_1_fp_ctrl_swap23 <= io_enq_bits_uop_fp_ctrl_swap23_0; // @[util.scala:458:7, :505:22]
uops_1_fp_ctrl_typeTagIn <= io_enq_bits_uop_fp_ctrl_typeTagIn_0; // @[util.scala:458:7, :505:22]
uops_1_fp_ctrl_typeTagOut <= io_enq_bits_uop_fp_ctrl_typeTagOut_0; // @[util.scala:458:7, :505:22]
uops_1_fp_ctrl_fromint <= io_enq_bits_uop_fp_ctrl_fromint_0; // @[util.scala:458:7, :505:22]
uops_1_fp_ctrl_toint <= io_enq_bits_uop_fp_ctrl_toint_0; // @[util.scala:458:7, :505:22]
uops_1_fp_ctrl_fastpipe <= io_enq_bits_uop_fp_ctrl_fastpipe_0; // @[util.scala:458:7, :505:22]
uops_1_fp_ctrl_fma <= io_enq_bits_uop_fp_ctrl_fma_0; // @[util.scala:458:7, :505:22]
uops_1_fp_ctrl_div <= io_enq_bits_uop_fp_ctrl_div_0; // @[util.scala:458:7, :505:22]
uops_1_fp_ctrl_sqrt <= io_enq_bits_uop_fp_ctrl_sqrt_0; // @[util.scala:458:7, :505:22]
uops_1_fp_ctrl_wflags <= io_enq_bits_uop_fp_ctrl_wflags_0; // @[util.scala:458:7, :505:22]
uops_1_fp_ctrl_vec <= io_enq_bits_uop_fp_ctrl_vec_0; // @[util.scala:458:7, :505:22]
uops_1_rob_idx <= io_enq_bits_uop_rob_idx_0; // @[util.scala:458:7, :505:22]
uops_1_ldq_idx <= io_enq_bits_uop_ldq_idx_0; // @[util.scala:458:7, :505:22]
uops_1_stq_idx <= io_enq_bits_uop_stq_idx_0; // @[util.scala:458:7, :505:22]
uops_1_rxq_idx <= io_enq_bits_uop_rxq_idx_0; // @[util.scala:458:7, :505:22]
uops_1_pdst <= io_enq_bits_uop_pdst_0; // @[util.scala:458:7, :505:22]
uops_1_prs1 <= io_enq_bits_uop_prs1_0; // @[util.scala:458:7, :505:22]
uops_1_prs2 <= io_enq_bits_uop_prs2_0; // @[util.scala:458:7, :505:22]
uops_1_prs3 <= io_enq_bits_uop_prs3_0; // @[util.scala:458:7, :505:22]
uops_1_ppred <= io_enq_bits_uop_ppred_0; // @[util.scala:458:7, :505:22]
uops_1_prs1_busy <= io_enq_bits_uop_prs1_busy_0; // @[util.scala:458:7, :505:22]
uops_1_prs2_busy <= io_enq_bits_uop_prs2_busy_0; // @[util.scala:458:7, :505:22]
uops_1_prs3_busy <= io_enq_bits_uop_prs3_busy_0; // @[util.scala:458:7, :505:22]
uops_1_ppred_busy <= io_enq_bits_uop_ppred_busy_0; // @[util.scala:458:7, :505:22]
uops_1_stale_pdst <= io_enq_bits_uop_stale_pdst_0; // @[util.scala:458:7, :505:22]
uops_1_exception <= io_enq_bits_uop_exception_0; // @[util.scala:458:7, :505:22]
uops_1_exc_cause <= io_enq_bits_uop_exc_cause_0; // @[util.scala:458:7, :505:22]
uops_1_mem_cmd <= io_enq_bits_uop_mem_cmd_0; // @[util.scala:458:7, :505:22]
uops_1_mem_size <= io_enq_bits_uop_mem_size_0; // @[util.scala:458:7, :505:22]
uops_1_mem_signed <= io_enq_bits_uop_mem_signed_0; // @[util.scala:458:7, :505:22]
uops_1_uses_ldq <= io_enq_bits_uop_uses_ldq_0; // @[util.scala:458:7, :505:22]
uops_1_uses_stq <= io_enq_bits_uop_uses_stq_0; // @[util.scala:458:7, :505:22]
uops_1_is_unique <= io_enq_bits_uop_is_unique_0; // @[util.scala:458:7, :505:22]
uops_1_flush_on_commit <= io_enq_bits_uop_flush_on_commit_0; // @[util.scala:458:7, :505:22]
uops_1_csr_cmd <= io_enq_bits_uop_csr_cmd_0; // @[util.scala:458:7, :505:22]
uops_1_ldst_is_rs1 <= io_enq_bits_uop_ldst_is_rs1_0; // @[util.scala:458:7, :505:22]
uops_1_ldst <= io_enq_bits_uop_ldst_0; // @[util.scala:458:7, :505:22]
uops_1_lrs1 <= io_enq_bits_uop_lrs1_0; // @[util.scala:458:7, :505:22]
uops_1_lrs2 <= io_enq_bits_uop_lrs2_0; // @[util.scala:458:7, :505:22]
uops_1_lrs3 <= io_enq_bits_uop_lrs3_0; // @[util.scala:458:7, :505:22]
uops_1_dst_rtype <= io_enq_bits_uop_dst_rtype_0; // @[util.scala:458:7, :505:22]
uops_1_lrs1_rtype <= io_enq_bits_uop_lrs1_rtype_0; // @[util.scala:458:7, :505:22]
uops_1_lrs2_rtype <= io_enq_bits_uop_lrs2_rtype_0; // @[util.scala:458:7, :505:22]
uops_1_frs3_en <= io_enq_bits_uop_frs3_en_0; // @[util.scala:458:7, :505:22]
uops_1_fcn_dw <= io_enq_bits_uop_fcn_dw_0; // @[util.scala:458:7, :505:22]
uops_1_fcn_op <= io_enq_bits_uop_fcn_op_0; // @[util.scala:458:7, :505:22]
uops_1_fp_val <= io_enq_bits_uop_fp_val_0; // @[util.scala:458:7, :505:22]
uops_1_fp_rm <= io_enq_bits_uop_fp_rm_0; // @[util.scala:458:7, :505:22]
uops_1_fp_typ <= io_enq_bits_uop_fp_typ_0; // @[util.scala:458:7, :505:22]
uops_1_xcpt_pf_if <= io_enq_bits_uop_xcpt_pf_if_0; // @[util.scala:458:7, :505:22]
uops_1_xcpt_ae_if <= io_enq_bits_uop_xcpt_ae_if_0; // @[util.scala:458:7, :505:22]
uops_1_xcpt_ma_if <= io_enq_bits_uop_xcpt_ma_if_0; // @[util.scala:458:7, :505:22]
uops_1_bp_debug_if <= io_enq_bits_uop_bp_debug_if_0; // @[util.scala:458:7, :505:22]
uops_1_bp_xcpt_if <= io_enq_bits_uop_bp_xcpt_if_0; // @[util.scala:458:7, :505:22]
uops_1_debug_fsrc <= io_enq_bits_uop_debug_fsrc_0; // @[util.scala:458:7, :505:22]
uops_1_debug_tsrc <= io_enq_bits_uop_debug_tsrc_0; // @[util.scala:458:7, :505:22]
end
if (do_enq & _GEN_117) // @[util.scala:514:26, :521:24, :526:19, :528:35, :530:35]
uops_1_br_mask <= _uops_br_mask_T_1; // @[util.scala:93:25, :505:22]
else if (valids_1) // @[util.scala:504:26]
uops_1_br_mask <= _uops_1_br_mask_T_1; // @[util.scala:97:21, :505:22]
if (_GEN_120) begin // @[util.scala:520:18, :526:19, :528:35]
uops_2_inst <= io_enq_bits_uop_inst_0; // @[util.scala:458:7, :505:22]
uops_2_debug_inst <= io_enq_bits_uop_debug_inst_0; // @[util.scala:458:7, :505:22]
uops_2_is_rvc <= io_enq_bits_uop_is_rvc_0; // @[util.scala:458:7, :505:22]
uops_2_debug_pc <= io_enq_bits_uop_debug_pc_0; // @[util.scala:458:7, :505:22]
uops_2_iq_type_0 <= io_enq_bits_uop_iq_type_0_0; // @[util.scala:458:7, :505:22]
uops_2_iq_type_1 <= io_enq_bits_uop_iq_type_1_0; // @[util.scala:458:7, :505:22]
uops_2_iq_type_2 <= io_enq_bits_uop_iq_type_2_0; // @[util.scala:458:7, :505:22]
uops_2_iq_type_3 <= io_enq_bits_uop_iq_type_3_0; // @[util.scala:458:7, :505:22]
uops_2_fu_code_0 <= io_enq_bits_uop_fu_code_0_0; // @[util.scala:458:7, :505:22]
uops_2_fu_code_1 <= io_enq_bits_uop_fu_code_1_0; // @[util.scala:458:7, :505:22]
uops_2_fu_code_2 <= io_enq_bits_uop_fu_code_2_0; // @[util.scala:458:7, :505:22]
uops_2_fu_code_3 <= io_enq_bits_uop_fu_code_3_0; // @[util.scala:458:7, :505:22]
uops_2_fu_code_4 <= io_enq_bits_uop_fu_code_4_0; // @[util.scala:458:7, :505:22]
uops_2_fu_code_5 <= io_enq_bits_uop_fu_code_5_0; // @[util.scala:458:7, :505:22]
uops_2_fu_code_6 <= io_enq_bits_uop_fu_code_6_0; // @[util.scala:458:7, :505:22]
uops_2_fu_code_7 <= io_enq_bits_uop_fu_code_7_0; // @[util.scala:458:7, :505:22]
uops_2_fu_code_8 <= io_enq_bits_uop_fu_code_8_0; // @[util.scala:458:7, :505:22]
uops_2_fu_code_9 <= io_enq_bits_uop_fu_code_9_0; // @[util.scala:458:7, :505:22]
uops_2_iw_issued <= io_enq_bits_uop_iw_issued_0; // @[util.scala:458:7, :505:22]
uops_2_iw_issued_partial_agen <= io_enq_bits_uop_iw_issued_partial_agen_0; // @[util.scala:458:7, :505:22]
uops_2_iw_issued_partial_dgen <= io_enq_bits_uop_iw_issued_partial_dgen_0; // @[util.scala:458:7, :505:22]
uops_2_iw_p1_speculative_child <= io_enq_bits_uop_iw_p1_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_2_iw_p2_speculative_child <= io_enq_bits_uop_iw_p2_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_2_iw_p1_bypass_hint <= io_enq_bits_uop_iw_p1_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_2_iw_p2_bypass_hint <= io_enq_bits_uop_iw_p2_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_2_iw_p3_bypass_hint <= io_enq_bits_uop_iw_p3_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_2_dis_col_sel <= io_enq_bits_uop_dis_col_sel_0; // @[util.scala:458:7, :505:22]
uops_2_br_tag <= io_enq_bits_uop_br_tag_0; // @[util.scala:458:7, :505:22]
uops_2_br_type <= io_enq_bits_uop_br_type_0; // @[util.scala:458:7, :505:22]
uops_2_is_sfb <= io_enq_bits_uop_is_sfb_0; // @[util.scala:458:7, :505:22]
uops_2_is_fence <= io_enq_bits_uop_is_fence_0; // @[util.scala:458:7, :505:22]
uops_2_is_fencei <= io_enq_bits_uop_is_fencei_0; // @[util.scala:458:7, :505:22]
uops_2_is_sfence <= io_enq_bits_uop_is_sfence_0; // @[util.scala:458:7, :505:22]
uops_2_is_amo <= io_enq_bits_uop_is_amo_0; // @[util.scala:458:7, :505:22]
uops_2_is_eret <= io_enq_bits_uop_is_eret_0; // @[util.scala:458:7, :505:22]
uops_2_is_sys_pc2epc <= io_enq_bits_uop_is_sys_pc2epc_0; // @[util.scala:458:7, :505:22]
uops_2_is_rocc <= io_enq_bits_uop_is_rocc_0; // @[util.scala:458:7, :505:22]
uops_2_is_mov <= io_enq_bits_uop_is_mov_0; // @[util.scala:458:7, :505:22]
uops_2_ftq_idx <= io_enq_bits_uop_ftq_idx_0; // @[util.scala:458:7, :505:22]
uops_2_edge_inst <= io_enq_bits_uop_edge_inst_0; // @[util.scala:458:7, :505:22]
uops_2_pc_lob <= io_enq_bits_uop_pc_lob_0; // @[util.scala:458:7, :505:22]
uops_2_taken <= io_enq_bits_uop_taken_0; // @[util.scala:458:7, :505:22]
uops_2_imm_rename <= io_enq_bits_uop_imm_rename_0; // @[util.scala:458:7, :505:22]
uops_2_imm_sel <= io_enq_bits_uop_imm_sel_0; // @[util.scala:458:7, :505:22]
uops_2_pimm <= io_enq_bits_uop_pimm_0; // @[util.scala:458:7, :505:22]
uops_2_imm_packed <= io_enq_bits_uop_imm_packed_0; // @[util.scala:458:7, :505:22]
uops_2_op1_sel <= io_enq_bits_uop_op1_sel_0; // @[util.scala:458:7, :505:22]
uops_2_op2_sel <= io_enq_bits_uop_op2_sel_0; // @[util.scala:458:7, :505:22]
uops_2_fp_ctrl_ldst <= io_enq_bits_uop_fp_ctrl_ldst_0; // @[util.scala:458:7, :505:22]
uops_2_fp_ctrl_wen <= io_enq_bits_uop_fp_ctrl_wen_0; // @[util.scala:458:7, :505:22]
uops_2_fp_ctrl_ren1 <= io_enq_bits_uop_fp_ctrl_ren1_0; // @[util.scala:458:7, :505:22]
uops_2_fp_ctrl_ren2 <= io_enq_bits_uop_fp_ctrl_ren2_0; // @[util.scala:458:7, :505:22]
uops_2_fp_ctrl_ren3 <= io_enq_bits_uop_fp_ctrl_ren3_0; // @[util.scala:458:7, :505:22]
uops_2_fp_ctrl_swap12 <= io_enq_bits_uop_fp_ctrl_swap12_0; // @[util.scala:458:7, :505:22]
uops_2_fp_ctrl_swap23 <= io_enq_bits_uop_fp_ctrl_swap23_0; // @[util.scala:458:7, :505:22]
uops_2_fp_ctrl_typeTagIn <= io_enq_bits_uop_fp_ctrl_typeTagIn_0; // @[util.scala:458:7, :505:22]
uops_2_fp_ctrl_typeTagOut <= io_enq_bits_uop_fp_ctrl_typeTagOut_0; // @[util.scala:458:7, :505:22]
uops_2_fp_ctrl_fromint <= io_enq_bits_uop_fp_ctrl_fromint_0; // @[util.scala:458:7, :505:22]
uops_2_fp_ctrl_toint <= io_enq_bits_uop_fp_ctrl_toint_0; // @[util.scala:458:7, :505:22]
uops_2_fp_ctrl_fastpipe <= io_enq_bits_uop_fp_ctrl_fastpipe_0; // @[util.scala:458:7, :505:22]
uops_2_fp_ctrl_fma <= io_enq_bits_uop_fp_ctrl_fma_0; // @[util.scala:458:7, :505:22]
uops_2_fp_ctrl_div <= io_enq_bits_uop_fp_ctrl_div_0; // @[util.scala:458:7, :505:22]
uops_2_fp_ctrl_sqrt <= io_enq_bits_uop_fp_ctrl_sqrt_0; // @[util.scala:458:7, :505:22]
uops_2_fp_ctrl_wflags <= io_enq_bits_uop_fp_ctrl_wflags_0; // @[util.scala:458:7, :505:22]
uops_2_fp_ctrl_vec <= io_enq_bits_uop_fp_ctrl_vec_0; // @[util.scala:458:7, :505:22]
uops_2_rob_idx <= io_enq_bits_uop_rob_idx_0; // @[util.scala:458:7, :505:22]
uops_2_ldq_idx <= io_enq_bits_uop_ldq_idx_0; // @[util.scala:458:7, :505:22]
uops_2_stq_idx <= io_enq_bits_uop_stq_idx_0; // @[util.scala:458:7, :505:22]
uops_2_rxq_idx <= io_enq_bits_uop_rxq_idx_0; // @[util.scala:458:7, :505:22]
uops_2_pdst <= io_enq_bits_uop_pdst_0; // @[util.scala:458:7, :505:22]
uops_2_prs1 <= io_enq_bits_uop_prs1_0; // @[util.scala:458:7, :505:22]
uops_2_prs2 <= io_enq_bits_uop_prs2_0; // @[util.scala:458:7, :505:22]
uops_2_prs3 <= io_enq_bits_uop_prs3_0; // @[util.scala:458:7, :505:22]
uops_2_ppred <= io_enq_bits_uop_ppred_0; // @[util.scala:458:7, :505:22]
uops_2_prs1_busy <= io_enq_bits_uop_prs1_busy_0; // @[util.scala:458:7, :505:22]
uops_2_prs2_busy <= io_enq_bits_uop_prs2_busy_0; // @[util.scala:458:7, :505:22]
uops_2_prs3_busy <= io_enq_bits_uop_prs3_busy_0; // @[util.scala:458:7, :505:22]
uops_2_ppred_busy <= io_enq_bits_uop_ppred_busy_0; // @[util.scala:458:7, :505:22]
uops_2_stale_pdst <= io_enq_bits_uop_stale_pdst_0; // @[util.scala:458:7, :505:22]
uops_2_exception <= io_enq_bits_uop_exception_0; // @[util.scala:458:7, :505:22]
uops_2_exc_cause <= io_enq_bits_uop_exc_cause_0; // @[util.scala:458:7, :505:22]
uops_2_mem_cmd <= io_enq_bits_uop_mem_cmd_0; // @[util.scala:458:7, :505:22]
uops_2_mem_size <= io_enq_bits_uop_mem_size_0; // @[util.scala:458:7, :505:22]
uops_2_mem_signed <= io_enq_bits_uop_mem_signed_0; // @[util.scala:458:7, :505:22]
uops_2_uses_ldq <= io_enq_bits_uop_uses_ldq_0; // @[util.scala:458:7, :505:22]
uops_2_uses_stq <= io_enq_bits_uop_uses_stq_0; // @[util.scala:458:7, :505:22]
uops_2_is_unique <= io_enq_bits_uop_is_unique_0; // @[util.scala:458:7, :505:22]
uops_2_flush_on_commit <= io_enq_bits_uop_flush_on_commit_0; // @[util.scala:458:7, :505:22]
uops_2_csr_cmd <= io_enq_bits_uop_csr_cmd_0; // @[util.scala:458:7, :505:22]
uops_2_ldst_is_rs1 <= io_enq_bits_uop_ldst_is_rs1_0; // @[util.scala:458:7, :505:22]
uops_2_ldst <= io_enq_bits_uop_ldst_0; // @[util.scala:458:7, :505:22]
uops_2_lrs1 <= io_enq_bits_uop_lrs1_0; // @[util.scala:458:7, :505:22]
uops_2_lrs2 <= io_enq_bits_uop_lrs2_0; // @[util.scala:458:7, :505:22]
uops_2_lrs3 <= io_enq_bits_uop_lrs3_0; // @[util.scala:458:7, :505:22]
uops_2_dst_rtype <= io_enq_bits_uop_dst_rtype_0; // @[util.scala:458:7, :505:22]
uops_2_lrs1_rtype <= io_enq_bits_uop_lrs1_rtype_0; // @[util.scala:458:7, :505:22]
uops_2_lrs2_rtype <= io_enq_bits_uop_lrs2_rtype_0; // @[util.scala:458:7, :505:22]
uops_2_frs3_en <= io_enq_bits_uop_frs3_en_0; // @[util.scala:458:7, :505:22]
uops_2_fcn_dw <= io_enq_bits_uop_fcn_dw_0; // @[util.scala:458:7, :505:22]
uops_2_fcn_op <= io_enq_bits_uop_fcn_op_0; // @[util.scala:458:7, :505:22]
uops_2_fp_val <= io_enq_bits_uop_fp_val_0; // @[util.scala:458:7, :505:22]
uops_2_fp_rm <= io_enq_bits_uop_fp_rm_0; // @[util.scala:458:7, :505:22]
uops_2_fp_typ <= io_enq_bits_uop_fp_typ_0; // @[util.scala:458:7, :505:22]
uops_2_xcpt_pf_if <= io_enq_bits_uop_xcpt_pf_if_0; // @[util.scala:458:7, :505:22]
uops_2_xcpt_ae_if <= io_enq_bits_uop_xcpt_ae_if_0; // @[util.scala:458:7, :505:22]
uops_2_xcpt_ma_if <= io_enq_bits_uop_xcpt_ma_if_0; // @[util.scala:458:7, :505:22]
uops_2_bp_debug_if <= io_enq_bits_uop_bp_debug_if_0; // @[util.scala:458:7, :505:22]
uops_2_bp_xcpt_if <= io_enq_bits_uop_bp_xcpt_if_0; // @[util.scala:458:7, :505:22]
uops_2_debug_fsrc <= io_enq_bits_uop_debug_fsrc_0; // @[util.scala:458:7, :505:22]
uops_2_debug_tsrc <= io_enq_bits_uop_debug_tsrc_0; // @[util.scala:458:7, :505:22]
end
if (do_enq & _GEN_119) // @[util.scala:514:26, :521:24, :526:19, :528:35, :530:35]
uops_2_br_mask <= _uops_br_mask_T_1; // @[util.scala:93:25, :505:22]
else if (valids_2) // @[util.scala:504:26]
uops_2_br_mask <= _uops_2_br_mask_T_1; // @[util.scala:97:21, :505:22]
if (_GEN_122) begin // @[util.scala:520:18, :526:19, :528:35]
uops_3_inst <= io_enq_bits_uop_inst_0; // @[util.scala:458:7, :505:22]
uops_3_debug_inst <= io_enq_bits_uop_debug_inst_0; // @[util.scala:458:7, :505:22]
uops_3_is_rvc <= io_enq_bits_uop_is_rvc_0; // @[util.scala:458:7, :505:22]
uops_3_debug_pc <= io_enq_bits_uop_debug_pc_0; // @[util.scala:458:7, :505:22]
uops_3_iq_type_0 <= io_enq_bits_uop_iq_type_0_0; // @[util.scala:458:7, :505:22]
uops_3_iq_type_1 <= io_enq_bits_uop_iq_type_1_0; // @[util.scala:458:7, :505:22]
uops_3_iq_type_2 <= io_enq_bits_uop_iq_type_2_0; // @[util.scala:458:7, :505:22]
uops_3_iq_type_3 <= io_enq_bits_uop_iq_type_3_0; // @[util.scala:458:7, :505:22]
uops_3_fu_code_0 <= io_enq_bits_uop_fu_code_0_0; // @[util.scala:458:7, :505:22]
uops_3_fu_code_1 <= io_enq_bits_uop_fu_code_1_0; // @[util.scala:458:7, :505:22]
uops_3_fu_code_2 <= io_enq_bits_uop_fu_code_2_0; // @[util.scala:458:7, :505:22]
uops_3_fu_code_3 <= io_enq_bits_uop_fu_code_3_0; // @[util.scala:458:7, :505:22]
uops_3_fu_code_4 <= io_enq_bits_uop_fu_code_4_0; // @[util.scala:458:7, :505:22]
uops_3_fu_code_5 <= io_enq_bits_uop_fu_code_5_0; // @[util.scala:458:7, :505:22]
uops_3_fu_code_6 <= io_enq_bits_uop_fu_code_6_0; // @[util.scala:458:7, :505:22]
uops_3_fu_code_7 <= io_enq_bits_uop_fu_code_7_0; // @[util.scala:458:7, :505:22]
uops_3_fu_code_8 <= io_enq_bits_uop_fu_code_8_0; // @[util.scala:458:7, :505:22]
uops_3_fu_code_9 <= io_enq_bits_uop_fu_code_9_0; // @[util.scala:458:7, :505:22]
uops_3_iw_issued <= io_enq_bits_uop_iw_issued_0; // @[util.scala:458:7, :505:22]
uops_3_iw_issued_partial_agen <= io_enq_bits_uop_iw_issued_partial_agen_0; // @[util.scala:458:7, :505:22]
uops_3_iw_issued_partial_dgen <= io_enq_bits_uop_iw_issued_partial_dgen_0; // @[util.scala:458:7, :505:22]
uops_3_iw_p1_speculative_child <= io_enq_bits_uop_iw_p1_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_3_iw_p2_speculative_child <= io_enq_bits_uop_iw_p2_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_3_iw_p1_bypass_hint <= io_enq_bits_uop_iw_p1_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_3_iw_p2_bypass_hint <= io_enq_bits_uop_iw_p2_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_3_iw_p3_bypass_hint <= io_enq_bits_uop_iw_p3_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_3_dis_col_sel <= io_enq_bits_uop_dis_col_sel_0; // @[util.scala:458:7, :505:22]
uops_3_br_tag <= io_enq_bits_uop_br_tag_0; // @[util.scala:458:7, :505:22]
uops_3_br_type <= io_enq_bits_uop_br_type_0; // @[util.scala:458:7, :505:22]
uops_3_is_sfb <= io_enq_bits_uop_is_sfb_0; // @[util.scala:458:7, :505:22]
uops_3_is_fence <= io_enq_bits_uop_is_fence_0; // @[util.scala:458:7, :505:22]
uops_3_is_fencei <= io_enq_bits_uop_is_fencei_0; // @[util.scala:458:7, :505:22]
uops_3_is_sfence <= io_enq_bits_uop_is_sfence_0; // @[util.scala:458:7, :505:22]
uops_3_is_amo <= io_enq_bits_uop_is_amo_0; // @[util.scala:458:7, :505:22]
uops_3_is_eret <= io_enq_bits_uop_is_eret_0; // @[util.scala:458:7, :505:22]
uops_3_is_sys_pc2epc <= io_enq_bits_uop_is_sys_pc2epc_0; // @[util.scala:458:7, :505:22]
uops_3_is_rocc <= io_enq_bits_uop_is_rocc_0; // @[util.scala:458:7, :505:22]
uops_3_is_mov <= io_enq_bits_uop_is_mov_0; // @[util.scala:458:7, :505:22]
uops_3_ftq_idx <= io_enq_bits_uop_ftq_idx_0; // @[util.scala:458:7, :505:22]
uops_3_edge_inst <= io_enq_bits_uop_edge_inst_0; // @[util.scala:458:7, :505:22]
uops_3_pc_lob <= io_enq_bits_uop_pc_lob_0; // @[util.scala:458:7, :505:22]
uops_3_taken <= io_enq_bits_uop_taken_0; // @[util.scala:458:7, :505:22]
uops_3_imm_rename <= io_enq_bits_uop_imm_rename_0; // @[util.scala:458:7, :505:22]
uops_3_imm_sel <= io_enq_bits_uop_imm_sel_0; // @[util.scala:458:7, :505:22]
uops_3_pimm <= io_enq_bits_uop_pimm_0; // @[util.scala:458:7, :505:22]
uops_3_imm_packed <= io_enq_bits_uop_imm_packed_0; // @[util.scala:458:7, :505:22]
uops_3_op1_sel <= io_enq_bits_uop_op1_sel_0; // @[util.scala:458:7, :505:22]
uops_3_op2_sel <= io_enq_bits_uop_op2_sel_0; // @[util.scala:458:7, :505:22]
uops_3_fp_ctrl_ldst <= io_enq_bits_uop_fp_ctrl_ldst_0; // @[util.scala:458:7, :505:22]
uops_3_fp_ctrl_wen <= io_enq_bits_uop_fp_ctrl_wen_0; // @[util.scala:458:7, :505:22]
uops_3_fp_ctrl_ren1 <= io_enq_bits_uop_fp_ctrl_ren1_0; // @[util.scala:458:7, :505:22]
uops_3_fp_ctrl_ren2 <= io_enq_bits_uop_fp_ctrl_ren2_0; // @[util.scala:458:7, :505:22]
uops_3_fp_ctrl_ren3 <= io_enq_bits_uop_fp_ctrl_ren3_0; // @[util.scala:458:7, :505:22]
uops_3_fp_ctrl_swap12 <= io_enq_bits_uop_fp_ctrl_swap12_0; // @[util.scala:458:7, :505:22]
uops_3_fp_ctrl_swap23 <= io_enq_bits_uop_fp_ctrl_swap23_0; // @[util.scala:458:7, :505:22]
uops_3_fp_ctrl_typeTagIn <= io_enq_bits_uop_fp_ctrl_typeTagIn_0; // @[util.scala:458:7, :505:22]
uops_3_fp_ctrl_typeTagOut <= io_enq_bits_uop_fp_ctrl_typeTagOut_0; // @[util.scala:458:7, :505:22]
uops_3_fp_ctrl_fromint <= io_enq_bits_uop_fp_ctrl_fromint_0; // @[util.scala:458:7, :505:22]
uops_3_fp_ctrl_toint <= io_enq_bits_uop_fp_ctrl_toint_0; // @[util.scala:458:7, :505:22]
uops_3_fp_ctrl_fastpipe <= io_enq_bits_uop_fp_ctrl_fastpipe_0; // @[util.scala:458:7, :505:22]
uops_3_fp_ctrl_fma <= io_enq_bits_uop_fp_ctrl_fma_0; // @[util.scala:458:7, :505:22]
uops_3_fp_ctrl_div <= io_enq_bits_uop_fp_ctrl_div_0; // @[util.scala:458:7, :505:22]
uops_3_fp_ctrl_sqrt <= io_enq_bits_uop_fp_ctrl_sqrt_0; // @[util.scala:458:7, :505:22]
uops_3_fp_ctrl_wflags <= io_enq_bits_uop_fp_ctrl_wflags_0; // @[util.scala:458:7, :505:22]
uops_3_fp_ctrl_vec <= io_enq_bits_uop_fp_ctrl_vec_0; // @[util.scala:458:7, :505:22]
uops_3_rob_idx <= io_enq_bits_uop_rob_idx_0; // @[util.scala:458:7, :505:22]
uops_3_ldq_idx <= io_enq_bits_uop_ldq_idx_0; // @[util.scala:458:7, :505:22]
uops_3_stq_idx <= io_enq_bits_uop_stq_idx_0; // @[util.scala:458:7, :505:22]
uops_3_rxq_idx <= io_enq_bits_uop_rxq_idx_0; // @[util.scala:458:7, :505:22]
uops_3_pdst <= io_enq_bits_uop_pdst_0; // @[util.scala:458:7, :505:22]
uops_3_prs1 <= io_enq_bits_uop_prs1_0; // @[util.scala:458:7, :505:22]
uops_3_prs2 <= io_enq_bits_uop_prs2_0; // @[util.scala:458:7, :505:22]
uops_3_prs3 <= io_enq_bits_uop_prs3_0; // @[util.scala:458:7, :505:22]
uops_3_ppred <= io_enq_bits_uop_ppred_0; // @[util.scala:458:7, :505:22]
uops_3_prs1_busy <= io_enq_bits_uop_prs1_busy_0; // @[util.scala:458:7, :505:22]
uops_3_prs2_busy <= io_enq_bits_uop_prs2_busy_0; // @[util.scala:458:7, :505:22]
uops_3_prs3_busy <= io_enq_bits_uop_prs3_busy_0; // @[util.scala:458:7, :505:22]
uops_3_ppred_busy <= io_enq_bits_uop_ppred_busy_0; // @[util.scala:458:7, :505:22]
uops_3_stale_pdst <= io_enq_bits_uop_stale_pdst_0; // @[util.scala:458:7, :505:22]
uops_3_exception <= io_enq_bits_uop_exception_0; // @[util.scala:458:7, :505:22]
uops_3_exc_cause <= io_enq_bits_uop_exc_cause_0; // @[util.scala:458:7, :505:22]
uops_3_mem_cmd <= io_enq_bits_uop_mem_cmd_0; // @[util.scala:458:7, :505:22]
uops_3_mem_size <= io_enq_bits_uop_mem_size_0; // @[util.scala:458:7, :505:22]
uops_3_mem_signed <= io_enq_bits_uop_mem_signed_0; // @[util.scala:458:7, :505:22]
uops_3_uses_ldq <= io_enq_bits_uop_uses_ldq_0; // @[util.scala:458:7, :505:22]
uops_3_uses_stq <= io_enq_bits_uop_uses_stq_0; // @[util.scala:458:7, :505:22]
uops_3_is_unique <= io_enq_bits_uop_is_unique_0; // @[util.scala:458:7, :505:22]
uops_3_flush_on_commit <= io_enq_bits_uop_flush_on_commit_0; // @[util.scala:458:7, :505:22]
uops_3_csr_cmd <= io_enq_bits_uop_csr_cmd_0; // @[util.scala:458:7, :505:22]
uops_3_ldst_is_rs1 <= io_enq_bits_uop_ldst_is_rs1_0; // @[util.scala:458:7, :505:22]
uops_3_ldst <= io_enq_bits_uop_ldst_0; // @[util.scala:458:7, :505:22]
uops_3_lrs1 <= io_enq_bits_uop_lrs1_0; // @[util.scala:458:7, :505:22]
uops_3_lrs2 <= io_enq_bits_uop_lrs2_0; // @[util.scala:458:7, :505:22]
uops_3_lrs3 <= io_enq_bits_uop_lrs3_0; // @[util.scala:458:7, :505:22]
uops_3_dst_rtype <= io_enq_bits_uop_dst_rtype_0; // @[util.scala:458:7, :505:22]
uops_3_lrs1_rtype <= io_enq_bits_uop_lrs1_rtype_0; // @[util.scala:458:7, :505:22]
uops_3_lrs2_rtype <= io_enq_bits_uop_lrs2_rtype_0; // @[util.scala:458:7, :505:22]
uops_3_frs3_en <= io_enq_bits_uop_frs3_en_0; // @[util.scala:458:7, :505:22]
uops_3_fcn_dw <= io_enq_bits_uop_fcn_dw_0; // @[util.scala:458:7, :505:22]
uops_3_fcn_op <= io_enq_bits_uop_fcn_op_0; // @[util.scala:458:7, :505:22]
uops_3_fp_val <= io_enq_bits_uop_fp_val_0; // @[util.scala:458:7, :505:22]
uops_3_fp_rm <= io_enq_bits_uop_fp_rm_0; // @[util.scala:458:7, :505:22]
uops_3_fp_typ <= io_enq_bits_uop_fp_typ_0; // @[util.scala:458:7, :505:22]
uops_3_xcpt_pf_if <= io_enq_bits_uop_xcpt_pf_if_0; // @[util.scala:458:7, :505:22]
uops_3_xcpt_ae_if <= io_enq_bits_uop_xcpt_ae_if_0; // @[util.scala:458:7, :505:22]
uops_3_xcpt_ma_if <= io_enq_bits_uop_xcpt_ma_if_0; // @[util.scala:458:7, :505:22]
uops_3_bp_debug_if <= io_enq_bits_uop_bp_debug_if_0; // @[util.scala:458:7, :505:22]
uops_3_bp_xcpt_if <= io_enq_bits_uop_bp_xcpt_if_0; // @[util.scala:458:7, :505:22]
uops_3_debug_fsrc <= io_enq_bits_uop_debug_fsrc_0; // @[util.scala:458:7, :505:22]
uops_3_debug_tsrc <= io_enq_bits_uop_debug_tsrc_0; // @[util.scala:458:7, :505:22]
end
if (do_enq & _GEN_121) // @[util.scala:514:26, :521:24, :526:19, :528:35, :530:35]
uops_3_br_mask <= _uops_br_mask_T_1; // @[util.scala:93:25, :505:22]
else if (valids_3) // @[util.scala:504:26]
uops_3_br_mask <= _uops_3_br_mask_T_1; // @[util.scala:97:21, :505:22]
if (_GEN_124) begin // @[util.scala:520:18, :526:19, :528:35]
uops_4_inst <= io_enq_bits_uop_inst_0; // @[util.scala:458:7, :505:22]
uops_4_debug_inst <= io_enq_bits_uop_debug_inst_0; // @[util.scala:458:7, :505:22]
uops_4_is_rvc <= io_enq_bits_uop_is_rvc_0; // @[util.scala:458:7, :505:22]
uops_4_debug_pc <= io_enq_bits_uop_debug_pc_0; // @[util.scala:458:7, :505:22]
uops_4_iq_type_0 <= io_enq_bits_uop_iq_type_0_0; // @[util.scala:458:7, :505:22]
uops_4_iq_type_1 <= io_enq_bits_uop_iq_type_1_0; // @[util.scala:458:7, :505:22]
uops_4_iq_type_2 <= io_enq_bits_uop_iq_type_2_0; // @[util.scala:458:7, :505:22]
uops_4_iq_type_3 <= io_enq_bits_uop_iq_type_3_0; // @[util.scala:458:7, :505:22]
uops_4_fu_code_0 <= io_enq_bits_uop_fu_code_0_0; // @[util.scala:458:7, :505:22]
uops_4_fu_code_1 <= io_enq_bits_uop_fu_code_1_0; // @[util.scala:458:7, :505:22]
uops_4_fu_code_2 <= io_enq_bits_uop_fu_code_2_0; // @[util.scala:458:7, :505:22]
uops_4_fu_code_3 <= io_enq_bits_uop_fu_code_3_0; // @[util.scala:458:7, :505:22]
uops_4_fu_code_4 <= io_enq_bits_uop_fu_code_4_0; // @[util.scala:458:7, :505:22]
uops_4_fu_code_5 <= io_enq_bits_uop_fu_code_5_0; // @[util.scala:458:7, :505:22]
uops_4_fu_code_6 <= io_enq_bits_uop_fu_code_6_0; // @[util.scala:458:7, :505:22]
uops_4_fu_code_7 <= io_enq_bits_uop_fu_code_7_0; // @[util.scala:458:7, :505:22]
uops_4_fu_code_8 <= io_enq_bits_uop_fu_code_8_0; // @[util.scala:458:7, :505:22]
uops_4_fu_code_9 <= io_enq_bits_uop_fu_code_9_0; // @[util.scala:458:7, :505:22]
uops_4_iw_issued <= io_enq_bits_uop_iw_issued_0; // @[util.scala:458:7, :505:22]
uops_4_iw_issued_partial_agen <= io_enq_bits_uop_iw_issued_partial_agen_0; // @[util.scala:458:7, :505:22]
uops_4_iw_issued_partial_dgen <= io_enq_bits_uop_iw_issued_partial_dgen_0; // @[util.scala:458:7, :505:22]
uops_4_iw_p1_speculative_child <= io_enq_bits_uop_iw_p1_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_4_iw_p2_speculative_child <= io_enq_bits_uop_iw_p2_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_4_iw_p1_bypass_hint <= io_enq_bits_uop_iw_p1_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_4_iw_p2_bypass_hint <= io_enq_bits_uop_iw_p2_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_4_iw_p3_bypass_hint <= io_enq_bits_uop_iw_p3_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_4_dis_col_sel <= io_enq_bits_uop_dis_col_sel_0; // @[util.scala:458:7, :505:22]
uops_4_br_tag <= io_enq_bits_uop_br_tag_0; // @[util.scala:458:7, :505:22]
uops_4_br_type <= io_enq_bits_uop_br_type_0; // @[util.scala:458:7, :505:22]
uops_4_is_sfb <= io_enq_bits_uop_is_sfb_0; // @[util.scala:458:7, :505:22]
uops_4_is_fence <= io_enq_bits_uop_is_fence_0; // @[util.scala:458:7, :505:22]
uops_4_is_fencei <= io_enq_bits_uop_is_fencei_0; // @[util.scala:458:7, :505:22]
uops_4_is_sfence <= io_enq_bits_uop_is_sfence_0; // @[util.scala:458:7, :505:22]
uops_4_is_amo <= io_enq_bits_uop_is_amo_0; // @[util.scala:458:7, :505:22]
uops_4_is_eret <= io_enq_bits_uop_is_eret_0; // @[util.scala:458:7, :505:22]
uops_4_is_sys_pc2epc <= io_enq_bits_uop_is_sys_pc2epc_0; // @[util.scala:458:7, :505:22]
uops_4_is_rocc <= io_enq_bits_uop_is_rocc_0; // @[util.scala:458:7, :505:22]
uops_4_is_mov <= io_enq_bits_uop_is_mov_0; // @[util.scala:458:7, :505:22]
uops_4_ftq_idx <= io_enq_bits_uop_ftq_idx_0; // @[util.scala:458:7, :505:22]
uops_4_edge_inst <= io_enq_bits_uop_edge_inst_0; // @[util.scala:458:7, :505:22]
uops_4_pc_lob <= io_enq_bits_uop_pc_lob_0; // @[util.scala:458:7, :505:22]
uops_4_taken <= io_enq_bits_uop_taken_0; // @[util.scala:458:7, :505:22]
uops_4_imm_rename <= io_enq_bits_uop_imm_rename_0; // @[util.scala:458:7, :505:22]
uops_4_imm_sel <= io_enq_bits_uop_imm_sel_0; // @[util.scala:458:7, :505:22]
uops_4_pimm <= io_enq_bits_uop_pimm_0; // @[util.scala:458:7, :505:22]
uops_4_imm_packed <= io_enq_bits_uop_imm_packed_0; // @[util.scala:458:7, :505:22]
uops_4_op1_sel <= io_enq_bits_uop_op1_sel_0; // @[util.scala:458:7, :505:22]
uops_4_op2_sel <= io_enq_bits_uop_op2_sel_0; // @[util.scala:458:7, :505:22]
uops_4_fp_ctrl_ldst <= io_enq_bits_uop_fp_ctrl_ldst_0; // @[util.scala:458:7, :505:22]
uops_4_fp_ctrl_wen <= io_enq_bits_uop_fp_ctrl_wen_0; // @[util.scala:458:7, :505:22]
uops_4_fp_ctrl_ren1 <= io_enq_bits_uop_fp_ctrl_ren1_0; // @[util.scala:458:7, :505:22]
uops_4_fp_ctrl_ren2 <= io_enq_bits_uop_fp_ctrl_ren2_0; // @[util.scala:458:7, :505:22]
uops_4_fp_ctrl_ren3 <= io_enq_bits_uop_fp_ctrl_ren3_0; // @[util.scala:458:7, :505:22]
uops_4_fp_ctrl_swap12 <= io_enq_bits_uop_fp_ctrl_swap12_0; // @[util.scala:458:7, :505:22]
uops_4_fp_ctrl_swap23 <= io_enq_bits_uop_fp_ctrl_swap23_0; // @[util.scala:458:7, :505:22]
uops_4_fp_ctrl_typeTagIn <= io_enq_bits_uop_fp_ctrl_typeTagIn_0; // @[util.scala:458:7, :505:22]
uops_4_fp_ctrl_typeTagOut <= io_enq_bits_uop_fp_ctrl_typeTagOut_0; // @[util.scala:458:7, :505:22]
uops_4_fp_ctrl_fromint <= io_enq_bits_uop_fp_ctrl_fromint_0; // @[util.scala:458:7, :505:22]
uops_4_fp_ctrl_toint <= io_enq_bits_uop_fp_ctrl_toint_0; // @[util.scala:458:7, :505:22]
uops_4_fp_ctrl_fastpipe <= io_enq_bits_uop_fp_ctrl_fastpipe_0; // @[util.scala:458:7, :505:22]
uops_4_fp_ctrl_fma <= io_enq_bits_uop_fp_ctrl_fma_0; // @[util.scala:458:7, :505:22]
uops_4_fp_ctrl_div <= io_enq_bits_uop_fp_ctrl_div_0; // @[util.scala:458:7, :505:22]
uops_4_fp_ctrl_sqrt <= io_enq_bits_uop_fp_ctrl_sqrt_0; // @[util.scala:458:7, :505:22]
uops_4_fp_ctrl_wflags <= io_enq_bits_uop_fp_ctrl_wflags_0; // @[util.scala:458:7, :505:22]
uops_4_fp_ctrl_vec <= io_enq_bits_uop_fp_ctrl_vec_0; // @[util.scala:458:7, :505:22]
uops_4_rob_idx <= io_enq_bits_uop_rob_idx_0; // @[util.scala:458:7, :505:22]
uops_4_ldq_idx <= io_enq_bits_uop_ldq_idx_0; // @[util.scala:458:7, :505:22]
uops_4_stq_idx <= io_enq_bits_uop_stq_idx_0; // @[util.scala:458:7, :505:22]
uops_4_rxq_idx <= io_enq_bits_uop_rxq_idx_0; // @[util.scala:458:7, :505:22]
uops_4_pdst <= io_enq_bits_uop_pdst_0; // @[util.scala:458:7, :505:22]
uops_4_prs1 <= io_enq_bits_uop_prs1_0; // @[util.scala:458:7, :505:22]
uops_4_prs2 <= io_enq_bits_uop_prs2_0; // @[util.scala:458:7, :505:22]
uops_4_prs3 <= io_enq_bits_uop_prs3_0; // @[util.scala:458:7, :505:22]
uops_4_ppred <= io_enq_bits_uop_ppred_0; // @[util.scala:458:7, :505:22]
uops_4_prs1_busy <= io_enq_bits_uop_prs1_busy_0; // @[util.scala:458:7, :505:22]
uops_4_prs2_busy <= io_enq_bits_uop_prs2_busy_0; // @[util.scala:458:7, :505:22]
uops_4_prs3_busy <= io_enq_bits_uop_prs3_busy_0; // @[util.scala:458:7, :505:22]
uops_4_ppred_busy <= io_enq_bits_uop_ppred_busy_0; // @[util.scala:458:7, :505:22]
uops_4_stale_pdst <= io_enq_bits_uop_stale_pdst_0; // @[util.scala:458:7, :505:22]
uops_4_exception <= io_enq_bits_uop_exception_0; // @[util.scala:458:7, :505:22]
uops_4_exc_cause <= io_enq_bits_uop_exc_cause_0; // @[util.scala:458:7, :505:22]
uops_4_mem_cmd <= io_enq_bits_uop_mem_cmd_0; // @[util.scala:458:7, :505:22]
uops_4_mem_size <= io_enq_bits_uop_mem_size_0; // @[util.scala:458:7, :505:22]
uops_4_mem_signed <= io_enq_bits_uop_mem_signed_0; // @[util.scala:458:7, :505:22]
uops_4_uses_ldq <= io_enq_bits_uop_uses_ldq_0; // @[util.scala:458:7, :505:22]
uops_4_uses_stq <= io_enq_bits_uop_uses_stq_0; // @[util.scala:458:7, :505:22]
uops_4_is_unique <= io_enq_bits_uop_is_unique_0; // @[util.scala:458:7, :505:22]
uops_4_flush_on_commit <= io_enq_bits_uop_flush_on_commit_0; // @[util.scala:458:7, :505:22]
uops_4_csr_cmd <= io_enq_bits_uop_csr_cmd_0; // @[util.scala:458:7, :505:22]
uops_4_ldst_is_rs1 <= io_enq_bits_uop_ldst_is_rs1_0; // @[util.scala:458:7, :505:22]
uops_4_ldst <= io_enq_bits_uop_ldst_0; // @[util.scala:458:7, :505:22]
uops_4_lrs1 <= io_enq_bits_uop_lrs1_0; // @[util.scala:458:7, :505:22]
uops_4_lrs2 <= io_enq_bits_uop_lrs2_0; // @[util.scala:458:7, :505:22]
uops_4_lrs3 <= io_enq_bits_uop_lrs3_0; // @[util.scala:458:7, :505:22]
uops_4_dst_rtype <= io_enq_bits_uop_dst_rtype_0; // @[util.scala:458:7, :505:22]
uops_4_lrs1_rtype <= io_enq_bits_uop_lrs1_rtype_0; // @[util.scala:458:7, :505:22]
uops_4_lrs2_rtype <= io_enq_bits_uop_lrs2_rtype_0; // @[util.scala:458:7, :505:22]
uops_4_frs3_en <= io_enq_bits_uop_frs3_en_0; // @[util.scala:458:7, :505:22]
uops_4_fcn_dw <= io_enq_bits_uop_fcn_dw_0; // @[util.scala:458:7, :505:22]
uops_4_fcn_op <= io_enq_bits_uop_fcn_op_0; // @[util.scala:458:7, :505:22]
uops_4_fp_val <= io_enq_bits_uop_fp_val_0; // @[util.scala:458:7, :505:22]
uops_4_fp_rm <= io_enq_bits_uop_fp_rm_0; // @[util.scala:458:7, :505:22]
uops_4_fp_typ <= io_enq_bits_uop_fp_typ_0; // @[util.scala:458:7, :505:22]
uops_4_xcpt_pf_if <= io_enq_bits_uop_xcpt_pf_if_0; // @[util.scala:458:7, :505:22]
uops_4_xcpt_ae_if <= io_enq_bits_uop_xcpt_ae_if_0; // @[util.scala:458:7, :505:22]
uops_4_xcpt_ma_if <= io_enq_bits_uop_xcpt_ma_if_0; // @[util.scala:458:7, :505:22]
uops_4_bp_debug_if <= io_enq_bits_uop_bp_debug_if_0; // @[util.scala:458:7, :505:22]
uops_4_bp_xcpt_if <= io_enq_bits_uop_bp_xcpt_if_0; // @[util.scala:458:7, :505:22]
uops_4_debug_fsrc <= io_enq_bits_uop_debug_fsrc_0; // @[util.scala:458:7, :505:22]
uops_4_debug_tsrc <= io_enq_bits_uop_debug_tsrc_0; // @[util.scala:458:7, :505:22]
end
if (do_enq & _GEN_123) // @[util.scala:514:26, :521:24, :526:19, :528:35, :530:35]
uops_4_br_mask <= _uops_br_mask_T_1; // @[util.scala:93:25, :505:22]
else if (valids_4) // @[util.scala:504:26]
uops_4_br_mask <= _uops_4_br_mask_T_1; // @[util.scala:97:21, :505:22]
if (_GEN_126) begin // @[util.scala:520:18, :526:19, :528:35]
uops_5_inst <= io_enq_bits_uop_inst_0; // @[util.scala:458:7, :505:22]
uops_5_debug_inst <= io_enq_bits_uop_debug_inst_0; // @[util.scala:458:7, :505:22]
uops_5_is_rvc <= io_enq_bits_uop_is_rvc_0; // @[util.scala:458:7, :505:22]
uops_5_debug_pc <= io_enq_bits_uop_debug_pc_0; // @[util.scala:458:7, :505:22]
uops_5_iq_type_0 <= io_enq_bits_uop_iq_type_0_0; // @[util.scala:458:7, :505:22]
uops_5_iq_type_1 <= io_enq_bits_uop_iq_type_1_0; // @[util.scala:458:7, :505:22]
uops_5_iq_type_2 <= io_enq_bits_uop_iq_type_2_0; // @[util.scala:458:7, :505:22]
uops_5_iq_type_3 <= io_enq_bits_uop_iq_type_3_0; // @[util.scala:458:7, :505:22]
uops_5_fu_code_0 <= io_enq_bits_uop_fu_code_0_0; // @[util.scala:458:7, :505:22]
uops_5_fu_code_1 <= io_enq_bits_uop_fu_code_1_0; // @[util.scala:458:7, :505:22]
uops_5_fu_code_2 <= io_enq_bits_uop_fu_code_2_0; // @[util.scala:458:7, :505:22]
uops_5_fu_code_3 <= io_enq_bits_uop_fu_code_3_0; // @[util.scala:458:7, :505:22]
uops_5_fu_code_4 <= io_enq_bits_uop_fu_code_4_0; // @[util.scala:458:7, :505:22]
uops_5_fu_code_5 <= io_enq_bits_uop_fu_code_5_0; // @[util.scala:458:7, :505:22]
uops_5_fu_code_6 <= io_enq_bits_uop_fu_code_6_0; // @[util.scala:458:7, :505:22]
uops_5_fu_code_7 <= io_enq_bits_uop_fu_code_7_0; // @[util.scala:458:7, :505:22]
uops_5_fu_code_8 <= io_enq_bits_uop_fu_code_8_0; // @[util.scala:458:7, :505:22]
uops_5_fu_code_9 <= io_enq_bits_uop_fu_code_9_0; // @[util.scala:458:7, :505:22]
uops_5_iw_issued <= io_enq_bits_uop_iw_issued_0; // @[util.scala:458:7, :505:22]
uops_5_iw_issued_partial_agen <= io_enq_bits_uop_iw_issued_partial_agen_0; // @[util.scala:458:7, :505:22]
uops_5_iw_issued_partial_dgen <= io_enq_bits_uop_iw_issued_partial_dgen_0; // @[util.scala:458:7, :505:22]
uops_5_iw_p1_speculative_child <= io_enq_bits_uop_iw_p1_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_5_iw_p2_speculative_child <= io_enq_bits_uop_iw_p2_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_5_iw_p1_bypass_hint <= io_enq_bits_uop_iw_p1_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_5_iw_p2_bypass_hint <= io_enq_bits_uop_iw_p2_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_5_iw_p3_bypass_hint <= io_enq_bits_uop_iw_p3_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_5_dis_col_sel <= io_enq_bits_uop_dis_col_sel_0; // @[util.scala:458:7, :505:22]
uops_5_br_tag <= io_enq_bits_uop_br_tag_0; // @[util.scala:458:7, :505:22]
uops_5_br_type <= io_enq_bits_uop_br_type_0; // @[util.scala:458:7, :505:22]
uops_5_is_sfb <= io_enq_bits_uop_is_sfb_0; // @[util.scala:458:7, :505:22]
uops_5_is_fence <= io_enq_bits_uop_is_fence_0; // @[util.scala:458:7, :505:22]
uops_5_is_fencei <= io_enq_bits_uop_is_fencei_0; // @[util.scala:458:7, :505:22]
uops_5_is_sfence <= io_enq_bits_uop_is_sfence_0; // @[util.scala:458:7, :505:22]
uops_5_is_amo <= io_enq_bits_uop_is_amo_0; // @[util.scala:458:7, :505:22]
uops_5_is_eret <= io_enq_bits_uop_is_eret_0; // @[util.scala:458:7, :505:22]
uops_5_is_sys_pc2epc <= io_enq_bits_uop_is_sys_pc2epc_0; // @[util.scala:458:7, :505:22]
uops_5_is_rocc <= io_enq_bits_uop_is_rocc_0; // @[util.scala:458:7, :505:22]
uops_5_is_mov <= io_enq_bits_uop_is_mov_0; // @[util.scala:458:7, :505:22]
uops_5_ftq_idx <= io_enq_bits_uop_ftq_idx_0; // @[util.scala:458:7, :505:22]
uops_5_edge_inst <= io_enq_bits_uop_edge_inst_0; // @[util.scala:458:7, :505:22]
uops_5_pc_lob <= io_enq_bits_uop_pc_lob_0; // @[util.scala:458:7, :505:22]
uops_5_taken <= io_enq_bits_uop_taken_0; // @[util.scala:458:7, :505:22]
uops_5_imm_rename <= io_enq_bits_uop_imm_rename_0; // @[util.scala:458:7, :505:22]
uops_5_imm_sel <= io_enq_bits_uop_imm_sel_0; // @[util.scala:458:7, :505:22]
uops_5_pimm <= io_enq_bits_uop_pimm_0; // @[util.scala:458:7, :505:22]
uops_5_imm_packed <= io_enq_bits_uop_imm_packed_0; // @[util.scala:458:7, :505:22]
uops_5_op1_sel <= io_enq_bits_uop_op1_sel_0; // @[util.scala:458:7, :505:22]
uops_5_op2_sel <= io_enq_bits_uop_op2_sel_0; // @[util.scala:458:7, :505:22]
uops_5_fp_ctrl_ldst <= io_enq_bits_uop_fp_ctrl_ldst_0; // @[util.scala:458:7, :505:22]
uops_5_fp_ctrl_wen <= io_enq_bits_uop_fp_ctrl_wen_0; // @[util.scala:458:7, :505:22]
uops_5_fp_ctrl_ren1 <= io_enq_bits_uop_fp_ctrl_ren1_0; // @[util.scala:458:7, :505:22]
uops_5_fp_ctrl_ren2 <= io_enq_bits_uop_fp_ctrl_ren2_0; // @[util.scala:458:7, :505:22]
uops_5_fp_ctrl_ren3 <= io_enq_bits_uop_fp_ctrl_ren3_0; // @[util.scala:458:7, :505:22]
uops_5_fp_ctrl_swap12 <= io_enq_bits_uop_fp_ctrl_swap12_0; // @[util.scala:458:7, :505:22]
uops_5_fp_ctrl_swap23 <= io_enq_bits_uop_fp_ctrl_swap23_0; // @[util.scala:458:7, :505:22]
uops_5_fp_ctrl_typeTagIn <= io_enq_bits_uop_fp_ctrl_typeTagIn_0; // @[util.scala:458:7, :505:22]
uops_5_fp_ctrl_typeTagOut <= io_enq_bits_uop_fp_ctrl_typeTagOut_0; // @[util.scala:458:7, :505:22]
uops_5_fp_ctrl_fromint <= io_enq_bits_uop_fp_ctrl_fromint_0; // @[util.scala:458:7, :505:22]
uops_5_fp_ctrl_toint <= io_enq_bits_uop_fp_ctrl_toint_0; // @[util.scala:458:7, :505:22]
uops_5_fp_ctrl_fastpipe <= io_enq_bits_uop_fp_ctrl_fastpipe_0; // @[util.scala:458:7, :505:22]
uops_5_fp_ctrl_fma <= io_enq_bits_uop_fp_ctrl_fma_0; // @[util.scala:458:7, :505:22]
uops_5_fp_ctrl_div <= io_enq_bits_uop_fp_ctrl_div_0; // @[util.scala:458:7, :505:22]
uops_5_fp_ctrl_sqrt <= io_enq_bits_uop_fp_ctrl_sqrt_0; // @[util.scala:458:7, :505:22]
uops_5_fp_ctrl_wflags <= io_enq_bits_uop_fp_ctrl_wflags_0; // @[util.scala:458:7, :505:22]
uops_5_fp_ctrl_vec <= io_enq_bits_uop_fp_ctrl_vec_0; // @[util.scala:458:7, :505:22]
uops_5_rob_idx <= io_enq_bits_uop_rob_idx_0; // @[util.scala:458:7, :505:22]
uops_5_ldq_idx <= io_enq_bits_uop_ldq_idx_0; // @[util.scala:458:7, :505:22]
uops_5_stq_idx <= io_enq_bits_uop_stq_idx_0; // @[util.scala:458:7, :505:22]
uops_5_rxq_idx <= io_enq_bits_uop_rxq_idx_0; // @[util.scala:458:7, :505:22]
uops_5_pdst <= io_enq_bits_uop_pdst_0; // @[util.scala:458:7, :505:22]
uops_5_prs1 <= io_enq_bits_uop_prs1_0; // @[util.scala:458:7, :505:22]
uops_5_prs2 <= io_enq_bits_uop_prs2_0; // @[util.scala:458:7, :505:22]
uops_5_prs3 <= io_enq_bits_uop_prs3_0; // @[util.scala:458:7, :505:22]
uops_5_ppred <= io_enq_bits_uop_ppred_0; // @[util.scala:458:7, :505:22]
uops_5_prs1_busy <= io_enq_bits_uop_prs1_busy_0; // @[util.scala:458:7, :505:22]
uops_5_prs2_busy <= io_enq_bits_uop_prs2_busy_0; // @[util.scala:458:7, :505:22]
uops_5_prs3_busy <= io_enq_bits_uop_prs3_busy_0; // @[util.scala:458:7, :505:22]
uops_5_ppred_busy <= io_enq_bits_uop_ppred_busy_0; // @[util.scala:458:7, :505:22]
uops_5_stale_pdst <= io_enq_bits_uop_stale_pdst_0; // @[util.scala:458:7, :505:22]
uops_5_exception <= io_enq_bits_uop_exception_0; // @[util.scala:458:7, :505:22]
uops_5_exc_cause <= io_enq_bits_uop_exc_cause_0; // @[util.scala:458:7, :505:22]
uops_5_mem_cmd <= io_enq_bits_uop_mem_cmd_0; // @[util.scala:458:7, :505:22]
uops_5_mem_size <= io_enq_bits_uop_mem_size_0; // @[util.scala:458:7, :505:22]
uops_5_mem_signed <= io_enq_bits_uop_mem_signed_0; // @[util.scala:458:7, :505:22]
uops_5_uses_ldq <= io_enq_bits_uop_uses_ldq_0; // @[util.scala:458:7, :505:22]
uops_5_uses_stq <= io_enq_bits_uop_uses_stq_0; // @[util.scala:458:7, :505:22]
uops_5_is_unique <= io_enq_bits_uop_is_unique_0; // @[util.scala:458:7, :505:22]
uops_5_flush_on_commit <= io_enq_bits_uop_flush_on_commit_0; // @[util.scala:458:7, :505:22]
uops_5_csr_cmd <= io_enq_bits_uop_csr_cmd_0; // @[util.scala:458:7, :505:22]
uops_5_ldst_is_rs1 <= io_enq_bits_uop_ldst_is_rs1_0; // @[util.scala:458:7, :505:22]
uops_5_ldst <= io_enq_bits_uop_ldst_0; // @[util.scala:458:7, :505:22]
uops_5_lrs1 <= io_enq_bits_uop_lrs1_0; // @[util.scala:458:7, :505:22]
uops_5_lrs2 <= io_enq_bits_uop_lrs2_0; // @[util.scala:458:7, :505:22]
uops_5_lrs3 <= io_enq_bits_uop_lrs3_0; // @[util.scala:458:7, :505:22]
uops_5_dst_rtype <= io_enq_bits_uop_dst_rtype_0; // @[util.scala:458:7, :505:22]
uops_5_lrs1_rtype <= io_enq_bits_uop_lrs1_rtype_0; // @[util.scala:458:7, :505:22]
uops_5_lrs2_rtype <= io_enq_bits_uop_lrs2_rtype_0; // @[util.scala:458:7, :505:22]
uops_5_frs3_en <= io_enq_bits_uop_frs3_en_0; // @[util.scala:458:7, :505:22]
uops_5_fcn_dw <= io_enq_bits_uop_fcn_dw_0; // @[util.scala:458:7, :505:22]
uops_5_fcn_op <= io_enq_bits_uop_fcn_op_0; // @[util.scala:458:7, :505:22]
uops_5_fp_val <= io_enq_bits_uop_fp_val_0; // @[util.scala:458:7, :505:22]
uops_5_fp_rm <= io_enq_bits_uop_fp_rm_0; // @[util.scala:458:7, :505:22]
uops_5_fp_typ <= io_enq_bits_uop_fp_typ_0; // @[util.scala:458:7, :505:22]
uops_5_xcpt_pf_if <= io_enq_bits_uop_xcpt_pf_if_0; // @[util.scala:458:7, :505:22]
uops_5_xcpt_ae_if <= io_enq_bits_uop_xcpt_ae_if_0; // @[util.scala:458:7, :505:22]
uops_5_xcpt_ma_if <= io_enq_bits_uop_xcpt_ma_if_0; // @[util.scala:458:7, :505:22]
uops_5_bp_debug_if <= io_enq_bits_uop_bp_debug_if_0; // @[util.scala:458:7, :505:22]
uops_5_bp_xcpt_if <= io_enq_bits_uop_bp_xcpt_if_0; // @[util.scala:458:7, :505:22]
uops_5_debug_fsrc <= io_enq_bits_uop_debug_fsrc_0; // @[util.scala:458:7, :505:22]
uops_5_debug_tsrc <= io_enq_bits_uop_debug_tsrc_0; // @[util.scala:458:7, :505:22]
end
if (do_enq & _GEN_125) // @[util.scala:514:26, :521:24, :526:19, :528:35, :530:35]
uops_5_br_mask <= _uops_br_mask_T_1; // @[util.scala:93:25, :505:22]
else if (valids_5) // @[util.scala:504:26]
uops_5_br_mask <= _uops_5_br_mask_T_1; // @[util.scala:97:21, :505:22]
if (_GEN_128) begin // @[util.scala:520:18, :526:19, :528:35]
uops_6_inst <= io_enq_bits_uop_inst_0; // @[util.scala:458:7, :505:22]
uops_6_debug_inst <= io_enq_bits_uop_debug_inst_0; // @[util.scala:458:7, :505:22]
uops_6_is_rvc <= io_enq_bits_uop_is_rvc_0; // @[util.scala:458:7, :505:22]
uops_6_debug_pc <= io_enq_bits_uop_debug_pc_0; // @[util.scala:458:7, :505:22]
uops_6_iq_type_0 <= io_enq_bits_uop_iq_type_0_0; // @[util.scala:458:7, :505:22]
uops_6_iq_type_1 <= io_enq_bits_uop_iq_type_1_0; // @[util.scala:458:7, :505:22]
uops_6_iq_type_2 <= io_enq_bits_uop_iq_type_2_0; // @[util.scala:458:7, :505:22]
uops_6_iq_type_3 <= io_enq_bits_uop_iq_type_3_0; // @[util.scala:458:7, :505:22]
uops_6_fu_code_0 <= io_enq_bits_uop_fu_code_0_0; // @[util.scala:458:7, :505:22]
uops_6_fu_code_1 <= io_enq_bits_uop_fu_code_1_0; // @[util.scala:458:7, :505:22]
uops_6_fu_code_2 <= io_enq_bits_uop_fu_code_2_0; // @[util.scala:458:7, :505:22]
uops_6_fu_code_3 <= io_enq_bits_uop_fu_code_3_0; // @[util.scala:458:7, :505:22]
uops_6_fu_code_4 <= io_enq_bits_uop_fu_code_4_0; // @[util.scala:458:7, :505:22]
uops_6_fu_code_5 <= io_enq_bits_uop_fu_code_5_0; // @[util.scala:458:7, :505:22]
uops_6_fu_code_6 <= io_enq_bits_uop_fu_code_6_0; // @[util.scala:458:7, :505:22]
uops_6_fu_code_7 <= io_enq_bits_uop_fu_code_7_0; // @[util.scala:458:7, :505:22]
uops_6_fu_code_8 <= io_enq_bits_uop_fu_code_8_0; // @[util.scala:458:7, :505:22]
uops_6_fu_code_9 <= io_enq_bits_uop_fu_code_9_0; // @[util.scala:458:7, :505:22]
uops_6_iw_issued <= io_enq_bits_uop_iw_issued_0; // @[util.scala:458:7, :505:22]
uops_6_iw_issued_partial_agen <= io_enq_bits_uop_iw_issued_partial_agen_0; // @[util.scala:458:7, :505:22]
uops_6_iw_issued_partial_dgen <= io_enq_bits_uop_iw_issued_partial_dgen_0; // @[util.scala:458:7, :505:22]
uops_6_iw_p1_speculative_child <= io_enq_bits_uop_iw_p1_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_6_iw_p2_speculative_child <= io_enq_bits_uop_iw_p2_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_6_iw_p1_bypass_hint <= io_enq_bits_uop_iw_p1_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_6_iw_p2_bypass_hint <= io_enq_bits_uop_iw_p2_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_6_iw_p3_bypass_hint <= io_enq_bits_uop_iw_p3_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_6_dis_col_sel <= io_enq_bits_uop_dis_col_sel_0; // @[util.scala:458:7, :505:22]
uops_6_br_tag <= io_enq_bits_uop_br_tag_0; // @[util.scala:458:7, :505:22]
uops_6_br_type <= io_enq_bits_uop_br_type_0; // @[util.scala:458:7, :505:22]
uops_6_is_sfb <= io_enq_bits_uop_is_sfb_0; // @[util.scala:458:7, :505:22]
uops_6_is_fence <= io_enq_bits_uop_is_fence_0; // @[util.scala:458:7, :505:22]
uops_6_is_fencei <= io_enq_bits_uop_is_fencei_0; // @[util.scala:458:7, :505:22]
uops_6_is_sfence <= io_enq_bits_uop_is_sfence_0; // @[util.scala:458:7, :505:22]
uops_6_is_amo <= io_enq_bits_uop_is_amo_0; // @[util.scala:458:7, :505:22]
uops_6_is_eret <= io_enq_bits_uop_is_eret_0; // @[util.scala:458:7, :505:22]
uops_6_is_sys_pc2epc <= io_enq_bits_uop_is_sys_pc2epc_0; // @[util.scala:458:7, :505:22]
uops_6_is_rocc <= io_enq_bits_uop_is_rocc_0; // @[util.scala:458:7, :505:22]
uops_6_is_mov <= io_enq_bits_uop_is_mov_0; // @[util.scala:458:7, :505:22]
uops_6_ftq_idx <= io_enq_bits_uop_ftq_idx_0; // @[util.scala:458:7, :505:22]
uops_6_edge_inst <= io_enq_bits_uop_edge_inst_0; // @[util.scala:458:7, :505:22]
uops_6_pc_lob <= io_enq_bits_uop_pc_lob_0; // @[util.scala:458:7, :505:22]
uops_6_taken <= io_enq_bits_uop_taken_0; // @[util.scala:458:7, :505:22]
uops_6_imm_rename <= io_enq_bits_uop_imm_rename_0; // @[util.scala:458:7, :505:22]
uops_6_imm_sel <= io_enq_bits_uop_imm_sel_0; // @[util.scala:458:7, :505:22]
uops_6_pimm <= io_enq_bits_uop_pimm_0; // @[util.scala:458:7, :505:22]
uops_6_imm_packed <= io_enq_bits_uop_imm_packed_0; // @[util.scala:458:7, :505:22]
uops_6_op1_sel <= io_enq_bits_uop_op1_sel_0; // @[util.scala:458:7, :505:22]
uops_6_op2_sel <= io_enq_bits_uop_op2_sel_0; // @[util.scala:458:7, :505:22]
uops_6_fp_ctrl_ldst <= io_enq_bits_uop_fp_ctrl_ldst_0; // @[util.scala:458:7, :505:22]
uops_6_fp_ctrl_wen <= io_enq_bits_uop_fp_ctrl_wen_0; // @[util.scala:458:7, :505:22]
uops_6_fp_ctrl_ren1 <= io_enq_bits_uop_fp_ctrl_ren1_0; // @[util.scala:458:7, :505:22]
uops_6_fp_ctrl_ren2 <= io_enq_bits_uop_fp_ctrl_ren2_0; // @[util.scala:458:7, :505:22]
uops_6_fp_ctrl_ren3 <= io_enq_bits_uop_fp_ctrl_ren3_0; // @[util.scala:458:7, :505:22]
uops_6_fp_ctrl_swap12 <= io_enq_bits_uop_fp_ctrl_swap12_0; // @[util.scala:458:7, :505:22]
uops_6_fp_ctrl_swap23 <= io_enq_bits_uop_fp_ctrl_swap23_0; // @[util.scala:458:7, :505:22]
uops_6_fp_ctrl_typeTagIn <= io_enq_bits_uop_fp_ctrl_typeTagIn_0; // @[util.scala:458:7, :505:22]
uops_6_fp_ctrl_typeTagOut <= io_enq_bits_uop_fp_ctrl_typeTagOut_0; // @[util.scala:458:7, :505:22]
uops_6_fp_ctrl_fromint <= io_enq_bits_uop_fp_ctrl_fromint_0; // @[util.scala:458:7, :505:22]
uops_6_fp_ctrl_toint <= io_enq_bits_uop_fp_ctrl_toint_0; // @[util.scala:458:7, :505:22]
uops_6_fp_ctrl_fastpipe <= io_enq_bits_uop_fp_ctrl_fastpipe_0; // @[util.scala:458:7, :505:22]
uops_6_fp_ctrl_fma <= io_enq_bits_uop_fp_ctrl_fma_0; // @[util.scala:458:7, :505:22]
uops_6_fp_ctrl_div <= io_enq_bits_uop_fp_ctrl_div_0; // @[util.scala:458:7, :505:22]
uops_6_fp_ctrl_sqrt <= io_enq_bits_uop_fp_ctrl_sqrt_0; // @[util.scala:458:7, :505:22]
uops_6_fp_ctrl_wflags <= io_enq_bits_uop_fp_ctrl_wflags_0; // @[util.scala:458:7, :505:22]
uops_6_fp_ctrl_vec <= io_enq_bits_uop_fp_ctrl_vec_0; // @[util.scala:458:7, :505:22]
uops_6_rob_idx <= io_enq_bits_uop_rob_idx_0; // @[util.scala:458:7, :505:22]
uops_6_ldq_idx <= io_enq_bits_uop_ldq_idx_0; // @[util.scala:458:7, :505:22]
uops_6_stq_idx <= io_enq_bits_uop_stq_idx_0; // @[util.scala:458:7, :505:22]
uops_6_rxq_idx <= io_enq_bits_uop_rxq_idx_0; // @[util.scala:458:7, :505:22]
uops_6_pdst <= io_enq_bits_uop_pdst_0; // @[util.scala:458:7, :505:22]
uops_6_prs1 <= io_enq_bits_uop_prs1_0; // @[util.scala:458:7, :505:22]
uops_6_prs2 <= io_enq_bits_uop_prs2_0; // @[util.scala:458:7, :505:22]
uops_6_prs3 <= io_enq_bits_uop_prs3_0; // @[util.scala:458:7, :505:22]
uops_6_ppred <= io_enq_bits_uop_ppred_0; // @[util.scala:458:7, :505:22]
uops_6_prs1_busy <= io_enq_bits_uop_prs1_busy_0; // @[util.scala:458:7, :505:22]
uops_6_prs2_busy <= io_enq_bits_uop_prs2_busy_0; // @[util.scala:458:7, :505:22]
uops_6_prs3_busy <= io_enq_bits_uop_prs3_busy_0; // @[util.scala:458:7, :505:22]
uops_6_ppred_busy <= io_enq_bits_uop_ppred_busy_0; // @[util.scala:458:7, :505:22]
uops_6_stale_pdst <= io_enq_bits_uop_stale_pdst_0; // @[util.scala:458:7, :505:22]
uops_6_exception <= io_enq_bits_uop_exception_0; // @[util.scala:458:7, :505:22]
uops_6_exc_cause <= io_enq_bits_uop_exc_cause_0; // @[util.scala:458:7, :505:22]
uops_6_mem_cmd <= io_enq_bits_uop_mem_cmd_0; // @[util.scala:458:7, :505:22]
uops_6_mem_size <= io_enq_bits_uop_mem_size_0; // @[util.scala:458:7, :505:22]
uops_6_mem_signed <= io_enq_bits_uop_mem_signed_0; // @[util.scala:458:7, :505:22]
uops_6_uses_ldq <= io_enq_bits_uop_uses_ldq_0; // @[util.scala:458:7, :505:22]
uops_6_uses_stq <= io_enq_bits_uop_uses_stq_0; // @[util.scala:458:7, :505:22]
uops_6_is_unique <= io_enq_bits_uop_is_unique_0; // @[util.scala:458:7, :505:22]
uops_6_flush_on_commit <= io_enq_bits_uop_flush_on_commit_0; // @[util.scala:458:7, :505:22]
uops_6_csr_cmd <= io_enq_bits_uop_csr_cmd_0; // @[util.scala:458:7, :505:22]
uops_6_ldst_is_rs1 <= io_enq_bits_uop_ldst_is_rs1_0; // @[util.scala:458:7, :505:22]
uops_6_ldst <= io_enq_bits_uop_ldst_0; // @[util.scala:458:7, :505:22]
uops_6_lrs1 <= io_enq_bits_uop_lrs1_0; // @[util.scala:458:7, :505:22]
uops_6_lrs2 <= io_enq_bits_uop_lrs2_0; // @[util.scala:458:7, :505:22]
uops_6_lrs3 <= io_enq_bits_uop_lrs3_0; // @[util.scala:458:7, :505:22]
uops_6_dst_rtype <= io_enq_bits_uop_dst_rtype_0; // @[util.scala:458:7, :505:22]
uops_6_lrs1_rtype <= io_enq_bits_uop_lrs1_rtype_0; // @[util.scala:458:7, :505:22]
uops_6_lrs2_rtype <= io_enq_bits_uop_lrs2_rtype_0; // @[util.scala:458:7, :505:22]
uops_6_frs3_en <= io_enq_bits_uop_frs3_en_0; // @[util.scala:458:7, :505:22]
uops_6_fcn_dw <= io_enq_bits_uop_fcn_dw_0; // @[util.scala:458:7, :505:22]
uops_6_fcn_op <= io_enq_bits_uop_fcn_op_0; // @[util.scala:458:7, :505:22]
uops_6_fp_val <= io_enq_bits_uop_fp_val_0; // @[util.scala:458:7, :505:22]
uops_6_fp_rm <= io_enq_bits_uop_fp_rm_0; // @[util.scala:458:7, :505:22]
uops_6_fp_typ <= io_enq_bits_uop_fp_typ_0; // @[util.scala:458:7, :505:22]
uops_6_xcpt_pf_if <= io_enq_bits_uop_xcpt_pf_if_0; // @[util.scala:458:7, :505:22]
uops_6_xcpt_ae_if <= io_enq_bits_uop_xcpt_ae_if_0; // @[util.scala:458:7, :505:22]
uops_6_xcpt_ma_if <= io_enq_bits_uop_xcpt_ma_if_0; // @[util.scala:458:7, :505:22]
uops_6_bp_debug_if <= io_enq_bits_uop_bp_debug_if_0; // @[util.scala:458:7, :505:22]
uops_6_bp_xcpt_if <= io_enq_bits_uop_bp_xcpt_if_0; // @[util.scala:458:7, :505:22]
uops_6_debug_fsrc <= io_enq_bits_uop_debug_fsrc_0; // @[util.scala:458:7, :505:22]
uops_6_debug_tsrc <= io_enq_bits_uop_debug_tsrc_0; // @[util.scala:458:7, :505:22]
end
if (do_enq & _GEN_127) // @[util.scala:514:26, :521:24, :526:19, :528:35, :530:35]
uops_6_br_mask <= _uops_br_mask_T_1; // @[util.scala:93:25, :505:22]
else if (valids_6) // @[util.scala:504:26]
uops_6_br_mask <= _uops_6_br_mask_T_1; // @[util.scala:97:21, :505:22]
if (_GEN_129) begin // @[util.scala:520:18, :526:19, :528:35]
uops_7_inst <= io_enq_bits_uop_inst_0; // @[util.scala:458:7, :505:22]
uops_7_debug_inst <= io_enq_bits_uop_debug_inst_0; // @[util.scala:458:7, :505:22]
uops_7_is_rvc <= io_enq_bits_uop_is_rvc_0; // @[util.scala:458:7, :505:22]
uops_7_debug_pc <= io_enq_bits_uop_debug_pc_0; // @[util.scala:458:7, :505:22]
uops_7_iq_type_0 <= io_enq_bits_uop_iq_type_0_0; // @[util.scala:458:7, :505:22]
uops_7_iq_type_1 <= io_enq_bits_uop_iq_type_1_0; // @[util.scala:458:7, :505:22]
uops_7_iq_type_2 <= io_enq_bits_uop_iq_type_2_0; // @[util.scala:458:7, :505:22]
uops_7_iq_type_3 <= io_enq_bits_uop_iq_type_3_0; // @[util.scala:458:7, :505:22]
uops_7_fu_code_0 <= io_enq_bits_uop_fu_code_0_0; // @[util.scala:458:7, :505:22]
uops_7_fu_code_1 <= io_enq_bits_uop_fu_code_1_0; // @[util.scala:458:7, :505:22]
uops_7_fu_code_2 <= io_enq_bits_uop_fu_code_2_0; // @[util.scala:458:7, :505:22]
uops_7_fu_code_3 <= io_enq_bits_uop_fu_code_3_0; // @[util.scala:458:7, :505:22]
uops_7_fu_code_4 <= io_enq_bits_uop_fu_code_4_0; // @[util.scala:458:7, :505:22]
uops_7_fu_code_5 <= io_enq_bits_uop_fu_code_5_0; // @[util.scala:458:7, :505:22]
uops_7_fu_code_6 <= io_enq_bits_uop_fu_code_6_0; // @[util.scala:458:7, :505:22]
uops_7_fu_code_7 <= io_enq_bits_uop_fu_code_7_0; // @[util.scala:458:7, :505:22]
uops_7_fu_code_8 <= io_enq_bits_uop_fu_code_8_0; // @[util.scala:458:7, :505:22]
uops_7_fu_code_9 <= io_enq_bits_uop_fu_code_9_0; // @[util.scala:458:7, :505:22]
uops_7_iw_issued <= io_enq_bits_uop_iw_issued_0; // @[util.scala:458:7, :505:22]
uops_7_iw_issued_partial_agen <= io_enq_bits_uop_iw_issued_partial_agen_0; // @[util.scala:458:7, :505:22]
uops_7_iw_issued_partial_dgen <= io_enq_bits_uop_iw_issued_partial_dgen_0; // @[util.scala:458:7, :505:22]
uops_7_iw_p1_speculative_child <= io_enq_bits_uop_iw_p1_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_7_iw_p2_speculative_child <= io_enq_bits_uop_iw_p2_speculative_child_0; // @[util.scala:458:7, :505:22]
uops_7_iw_p1_bypass_hint <= io_enq_bits_uop_iw_p1_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_7_iw_p2_bypass_hint <= io_enq_bits_uop_iw_p2_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_7_iw_p3_bypass_hint <= io_enq_bits_uop_iw_p3_bypass_hint_0; // @[util.scala:458:7, :505:22]
uops_7_dis_col_sel <= io_enq_bits_uop_dis_col_sel_0; // @[util.scala:458:7, :505:22]
uops_7_br_tag <= io_enq_bits_uop_br_tag_0; // @[util.scala:458:7, :505:22]
uops_7_br_type <= io_enq_bits_uop_br_type_0; // @[util.scala:458:7, :505:22]
uops_7_is_sfb <= io_enq_bits_uop_is_sfb_0; // @[util.scala:458:7, :505:22]
uops_7_is_fence <= io_enq_bits_uop_is_fence_0; // @[util.scala:458:7, :505:22]
uops_7_is_fencei <= io_enq_bits_uop_is_fencei_0; // @[util.scala:458:7, :505:22]
uops_7_is_sfence <= io_enq_bits_uop_is_sfence_0; // @[util.scala:458:7, :505:22]
uops_7_is_amo <= io_enq_bits_uop_is_amo_0; // @[util.scala:458:7, :505:22]
uops_7_is_eret <= io_enq_bits_uop_is_eret_0; // @[util.scala:458:7, :505:22]
uops_7_is_sys_pc2epc <= io_enq_bits_uop_is_sys_pc2epc_0; // @[util.scala:458:7, :505:22]
uops_7_is_rocc <= io_enq_bits_uop_is_rocc_0; // @[util.scala:458:7, :505:22]
uops_7_is_mov <= io_enq_bits_uop_is_mov_0; // @[util.scala:458:7, :505:22]
uops_7_ftq_idx <= io_enq_bits_uop_ftq_idx_0; // @[util.scala:458:7, :505:22]
uops_7_edge_inst <= io_enq_bits_uop_edge_inst_0; // @[util.scala:458:7, :505:22]
uops_7_pc_lob <= io_enq_bits_uop_pc_lob_0; // @[util.scala:458:7, :505:22]
uops_7_taken <= io_enq_bits_uop_taken_0; // @[util.scala:458:7, :505:22]
uops_7_imm_rename <= io_enq_bits_uop_imm_rename_0; // @[util.scala:458:7, :505:22]
uops_7_imm_sel <= io_enq_bits_uop_imm_sel_0; // @[util.scala:458:7, :505:22]
uops_7_pimm <= io_enq_bits_uop_pimm_0; // @[util.scala:458:7, :505:22]
uops_7_imm_packed <= io_enq_bits_uop_imm_packed_0; // @[util.scala:458:7, :505:22]
uops_7_op1_sel <= io_enq_bits_uop_op1_sel_0; // @[util.scala:458:7, :505:22]
uops_7_op2_sel <= io_enq_bits_uop_op2_sel_0; // @[util.scala:458:7, :505:22]
uops_7_fp_ctrl_ldst <= io_enq_bits_uop_fp_ctrl_ldst_0; // @[util.scala:458:7, :505:22]
uops_7_fp_ctrl_wen <= io_enq_bits_uop_fp_ctrl_wen_0; // @[util.scala:458:7, :505:22]
uops_7_fp_ctrl_ren1 <= io_enq_bits_uop_fp_ctrl_ren1_0; // @[util.scala:458:7, :505:22]
uops_7_fp_ctrl_ren2 <= io_enq_bits_uop_fp_ctrl_ren2_0; // @[util.scala:458:7, :505:22]
uops_7_fp_ctrl_ren3 <= io_enq_bits_uop_fp_ctrl_ren3_0; // @[util.scala:458:7, :505:22]
uops_7_fp_ctrl_swap12 <= io_enq_bits_uop_fp_ctrl_swap12_0; // @[util.scala:458:7, :505:22]
uops_7_fp_ctrl_swap23 <= io_enq_bits_uop_fp_ctrl_swap23_0; // @[util.scala:458:7, :505:22]
uops_7_fp_ctrl_typeTagIn <= io_enq_bits_uop_fp_ctrl_typeTagIn_0; // @[util.scala:458:7, :505:22]
uops_7_fp_ctrl_typeTagOut <= io_enq_bits_uop_fp_ctrl_typeTagOut_0; // @[util.scala:458:7, :505:22]
uops_7_fp_ctrl_fromint <= io_enq_bits_uop_fp_ctrl_fromint_0; // @[util.scala:458:7, :505:22]
uops_7_fp_ctrl_toint <= io_enq_bits_uop_fp_ctrl_toint_0; // @[util.scala:458:7, :505:22]
uops_7_fp_ctrl_fastpipe <= io_enq_bits_uop_fp_ctrl_fastpipe_0; // @[util.scala:458:7, :505:22]
uops_7_fp_ctrl_fma <= io_enq_bits_uop_fp_ctrl_fma_0; // @[util.scala:458:7, :505:22]
uops_7_fp_ctrl_div <= io_enq_bits_uop_fp_ctrl_div_0; // @[util.scala:458:7, :505:22]
uops_7_fp_ctrl_sqrt <= io_enq_bits_uop_fp_ctrl_sqrt_0; // @[util.scala:458:7, :505:22]
uops_7_fp_ctrl_wflags <= io_enq_bits_uop_fp_ctrl_wflags_0; // @[util.scala:458:7, :505:22]
uops_7_fp_ctrl_vec <= io_enq_bits_uop_fp_ctrl_vec_0; // @[util.scala:458:7, :505:22]
uops_7_rob_idx <= io_enq_bits_uop_rob_idx_0; // @[util.scala:458:7, :505:22]
uops_7_ldq_idx <= io_enq_bits_uop_ldq_idx_0; // @[util.scala:458:7, :505:22]
uops_7_stq_idx <= io_enq_bits_uop_stq_idx_0; // @[util.scala:458:7, :505:22]
uops_7_rxq_idx <= io_enq_bits_uop_rxq_idx_0; // @[util.scala:458:7, :505:22]
uops_7_pdst <= io_enq_bits_uop_pdst_0; // @[util.scala:458:7, :505:22]
uops_7_prs1 <= io_enq_bits_uop_prs1_0; // @[util.scala:458:7, :505:22]
uops_7_prs2 <= io_enq_bits_uop_prs2_0; // @[util.scala:458:7, :505:22]
uops_7_prs3 <= io_enq_bits_uop_prs3_0; // @[util.scala:458:7, :505:22]
uops_7_ppred <= io_enq_bits_uop_ppred_0; // @[util.scala:458:7, :505:22]
uops_7_prs1_busy <= io_enq_bits_uop_prs1_busy_0; // @[util.scala:458:7, :505:22]
uops_7_prs2_busy <= io_enq_bits_uop_prs2_busy_0; // @[util.scala:458:7, :505:22]
uops_7_prs3_busy <= io_enq_bits_uop_prs3_busy_0; // @[util.scala:458:7, :505:22]
uops_7_ppred_busy <= io_enq_bits_uop_ppred_busy_0; // @[util.scala:458:7, :505:22]
uops_7_stale_pdst <= io_enq_bits_uop_stale_pdst_0; // @[util.scala:458:7, :505:22]
uops_7_exception <= io_enq_bits_uop_exception_0; // @[util.scala:458:7, :505:22]
uops_7_exc_cause <= io_enq_bits_uop_exc_cause_0; // @[util.scala:458:7, :505:22]
uops_7_mem_cmd <= io_enq_bits_uop_mem_cmd_0; // @[util.scala:458:7, :505:22]
uops_7_mem_size <= io_enq_bits_uop_mem_size_0; // @[util.scala:458:7, :505:22]
uops_7_mem_signed <= io_enq_bits_uop_mem_signed_0; // @[util.scala:458:7, :505:22]
uops_7_uses_ldq <= io_enq_bits_uop_uses_ldq_0; // @[util.scala:458:7, :505:22]
uops_7_uses_stq <= io_enq_bits_uop_uses_stq_0; // @[util.scala:458:7, :505:22]
uops_7_is_unique <= io_enq_bits_uop_is_unique_0; // @[util.scala:458:7, :505:22]
uops_7_flush_on_commit <= io_enq_bits_uop_flush_on_commit_0; // @[util.scala:458:7, :505:22]
uops_7_csr_cmd <= io_enq_bits_uop_csr_cmd_0; // @[util.scala:458:7, :505:22]
uops_7_ldst_is_rs1 <= io_enq_bits_uop_ldst_is_rs1_0; // @[util.scala:458:7, :505:22]
uops_7_ldst <= io_enq_bits_uop_ldst_0; // @[util.scala:458:7, :505:22]
uops_7_lrs1 <= io_enq_bits_uop_lrs1_0; // @[util.scala:458:7, :505:22]
uops_7_lrs2 <= io_enq_bits_uop_lrs2_0; // @[util.scala:458:7, :505:22]
uops_7_lrs3 <= io_enq_bits_uop_lrs3_0; // @[util.scala:458:7, :505:22]
uops_7_dst_rtype <= io_enq_bits_uop_dst_rtype_0; // @[util.scala:458:7, :505:22]
uops_7_lrs1_rtype <= io_enq_bits_uop_lrs1_rtype_0; // @[util.scala:458:7, :505:22]
uops_7_lrs2_rtype <= io_enq_bits_uop_lrs2_rtype_0; // @[util.scala:458:7, :505:22]
uops_7_frs3_en <= io_enq_bits_uop_frs3_en_0; // @[util.scala:458:7, :505:22]
uops_7_fcn_dw <= io_enq_bits_uop_fcn_dw_0; // @[util.scala:458:7, :505:22]
uops_7_fcn_op <= io_enq_bits_uop_fcn_op_0; // @[util.scala:458:7, :505:22]
uops_7_fp_val <= io_enq_bits_uop_fp_val_0; // @[util.scala:458:7, :505:22]
uops_7_fp_rm <= io_enq_bits_uop_fp_rm_0; // @[util.scala:458:7, :505:22]
uops_7_fp_typ <= io_enq_bits_uop_fp_typ_0; // @[util.scala:458:7, :505:22]
uops_7_xcpt_pf_if <= io_enq_bits_uop_xcpt_pf_if_0; // @[util.scala:458:7, :505:22]
uops_7_xcpt_ae_if <= io_enq_bits_uop_xcpt_ae_if_0; // @[util.scala:458:7, :505:22]
uops_7_xcpt_ma_if <= io_enq_bits_uop_xcpt_ma_if_0; // @[util.scala:458:7, :505:22]
uops_7_bp_debug_if <= io_enq_bits_uop_bp_debug_if_0; // @[util.scala:458:7, :505:22]
uops_7_bp_xcpt_if <= io_enq_bits_uop_bp_xcpt_if_0; // @[util.scala:458:7, :505:22]
uops_7_debug_fsrc <= io_enq_bits_uop_debug_fsrc_0; // @[util.scala:458:7, :505:22]
uops_7_debug_tsrc <= io_enq_bits_uop_debug_tsrc_0; // @[util.scala:458:7, :505:22]
end
if (do_enq & (&enq_ptr_value)) // @[Counter.scala:61:40]
uops_7_br_mask <= _uops_br_mask_T_1; // @[util.scala:93:25, :505:22]
else if (valids_7) // @[util.scala:504:26]
uops_7_br_mask <= _uops_7_br_mask_T_1; // @[util.scala:97:21, :505:22]
always @(posedge)
ram_8x72 ram_ext ( // @[util.scala:503:22]
.R0_addr (deq_ptr_value), // @[Counter.scala:61:40]
.R0_en (1'h1),
.R0_clk (clock),
.R0_data (_ram_ext_R0_data),
.W0_addr (enq_ptr_value), // @[Counter.scala:61:40]
.W0_en (do_enq), // @[util.scala:514:26]
.W0_clk (clock),
.W0_data ({io_enq_bits_fflags_bits_0, io_enq_bits_fflags_valid_0, 1'h0, io_enq_bits_data_0}) // @[util.scala:458:7, :503:22]
); // @[util.scala:503:22]
assign io_enq_ready = io_enq_ready_0; // @[util.scala:458:7]
assign io_deq_valid = io_deq_valid_0; // @[util.scala:458:7]
assign io_deq_bits_uop_inst = io_deq_bits_uop_inst_0; // @[util.scala:458:7]
assign io_deq_bits_uop_debug_inst = io_deq_bits_uop_debug_inst_0; // @[util.scala:458:7]
assign io_deq_bits_uop_is_rvc = io_deq_bits_uop_is_rvc_0; // @[util.scala:458:7]
assign io_deq_bits_uop_debug_pc = io_deq_bits_uop_debug_pc_0; // @[util.scala:458:7]
assign io_deq_bits_uop_iq_type_0 = io_deq_bits_uop_iq_type_0_0; // @[util.scala:458:7]
assign io_deq_bits_uop_iq_type_1 = io_deq_bits_uop_iq_type_1_0; // @[util.scala:458:7]
assign io_deq_bits_uop_iq_type_2 = io_deq_bits_uop_iq_type_2_0; // @[util.scala:458:7]
assign io_deq_bits_uop_iq_type_3 = io_deq_bits_uop_iq_type_3_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fu_code_0 = io_deq_bits_uop_fu_code_0_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fu_code_1 = io_deq_bits_uop_fu_code_1_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fu_code_2 = io_deq_bits_uop_fu_code_2_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fu_code_3 = io_deq_bits_uop_fu_code_3_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fu_code_4 = io_deq_bits_uop_fu_code_4_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fu_code_5 = io_deq_bits_uop_fu_code_5_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fu_code_6 = io_deq_bits_uop_fu_code_6_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fu_code_7 = io_deq_bits_uop_fu_code_7_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fu_code_8 = io_deq_bits_uop_fu_code_8_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fu_code_9 = io_deq_bits_uop_fu_code_9_0; // @[util.scala:458:7]
assign io_deq_bits_uop_iw_issued = io_deq_bits_uop_iw_issued_0; // @[util.scala:458:7]
assign io_deq_bits_uop_iw_issued_partial_agen = io_deq_bits_uop_iw_issued_partial_agen_0; // @[util.scala:458:7]
assign io_deq_bits_uop_iw_issued_partial_dgen = io_deq_bits_uop_iw_issued_partial_dgen_0; // @[util.scala:458:7]
assign io_deq_bits_uop_iw_p1_speculative_child = io_deq_bits_uop_iw_p1_speculative_child_0; // @[util.scala:458:7]
assign io_deq_bits_uop_iw_p2_speculative_child = io_deq_bits_uop_iw_p2_speculative_child_0; // @[util.scala:458:7]
assign io_deq_bits_uop_iw_p1_bypass_hint = io_deq_bits_uop_iw_p1_bypass_hint_0; // @[util.scala:458:7]
assign io_deq_bits_uop_iw_p2_bypass_hint = io_deq_bits_uop_iw_p2_bypass_hint_0; // @[util.scala:458:7]
assign io_deq_bits_uop_iw_p3_bypass_hint = io_deq_bits_uop_iw_p3_bypass_hint_0; // @[util.scala:458:7]
assign io_deq_bits_uop_dis_col_sel = io_deq_bits_uop_dis_col_sel_0; // @[util.scala:458:7]
assign io_deq_bits_uop_br_mask = io_deq_bits_uop_br_mask_0; // @[util.scala:458:7]
assign io_deq_bits_uop_br_tag = io_deq_bits_uop_br_tag_0; // @[util.scala:458:7]
assign io_deq_bits_uop_br_type = io_deq_bits_uop_br_type_0; // @[util.scala:458:7]
assign io_deq_bits_uop_is_sfb = io_deq_bits_uop_is_sfb_0; // @[util.scala:458:7]
assign io_deq_bits_uop_is_fence = io_deq_bits_uop_is_fence_0; // @[util.scala:458:7]
assign io_deq_bits_uop_is_fencei = io_deq_bits_uop_is_fencei_0; // @[util.scala:458:7]
assign io_deq_bits_uop_is_sfence = io_deq_bits_uop_is_sfence_0; // @[util.scala:458:7]
assign io_deq_bits_uop_is_amo = io_deq_bits_uop_is_amo_0; // @[util.scala:458:7]
assign io_deq_bits_uop_is_eret = io_deq_bits_uop_is_eret_0; // @[util.scala:458:7]
assign io_deq_bits_uop_is_sys_pc2epc = io_deq_bits_uop_is_sys_pc2epc_0; // @[util.scala:458:7]
assign io_deq_bits_uop_is_rocc = io_deq_bits_uop_is_rocc_0; // @[util.scala:458:7]
assign io_deq_bits_uop_is_mov = io_deq_bits_uop_is_mov_0; // @[util.scala:458:7]
assign io_deq_bits_uop_ftq_idx = io_deq_bits_uop_ftq_idx_0; // @[util.scala:458:7]
assign io_deq_bits_uop_edge_inst = io_deq_bits_uop_edge_inst_0; // @[util.scala:458:7]
assign io_deq_bits_uop_pc_lob = io_deq_bits_uop_pc_lob_0; // @[util.scala:458:7]
assign io_deq_bits_uop_taken = io_deq_bits_uop_taken_0; // @[util.scala:458:7]
assign io_deq_bits_uop_imm_rename = io_deq_bits_uop_imm_rename_0; // @[util.scala:458:7]
assign io_deq_bits_uop_imm_sel = io_deq_bits_uop_imm_sel_0; // @[util.scala:458:7]
assign io_deq_bits_uop_pimm = io_deq_bits_uop_pimm_0; // @[util.scala:458:7]
assign io_deq_bits_uop_imm_packed = io_deq_bits_uop_imm_packed_0; // @[util.scala:458:7]
assign io_deq_bits_uop_op1_sel = io_deq_bits_uop_op1_sel_0; // @[util.scala:458:7]
assign io_deq_bits_uop_op2_sel = io_deq_bits_uop_op2_sel_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_ctrl_ldst = io_deq_bits_uop_fp_ctrl_ldst_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_ctrl_wen = io_deq_bits_uop_fp_ctrl_wen_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_ctrl_ren1 = io_deq_bits_uop_fp_ctrl_ren1_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_ctrl_ren2 = io_deq_bits_uop_fp_ctrl_ren2_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_ctrl_ren3 = io_deq_bits_uop_fp_ctrl_ren3_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_ctrl_swap12 = io_deq_bits_uop_fp_ctrl_swap12_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_ctrl_swap23 = io_deq_bits_uop_fp_ctrl_swap23_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_ctrl_typeTagIn = io_deq_bits_uop_fp_ctrl_typeTagIn_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_ctrl_typeTagOut = io_deq_bits_uop_fp_ctrl_typeTagOut_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_ctrl_fromint = io_deq_bits_uop_fp_ctrl_fromint_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_ctrl_toint = io_deq_bits_uop_fp_ctrl_toint_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_ctrl_fastpipe = io_deq_bits_uop_fp_ctrl_fastpipe_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_ctrl_fma = io_deq_bits_uop_fp_ctrl_fma_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_ctrl_div = io_deq_bits_uop_fp_ctrl_div_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_ctrl_sqrt = io_deq_bits_uop_fp_ctrl_sqrt_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_ctrl_wflags = io_deq_bits_uop_fp_ctrl_wflags_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_ctrl_vec = io_deq_bits_uop_fp_ctrl_vec_0; // @[util.scala:458:7]
assign io_deq_bits_uop_rob_idx = io_deq_bits_uop_rob_idx_0; // @[util.scala:458:7]
assign io_deq_bits_uop_ldq_idx = io_deq_bits_uop_ldq_idx_0; // @[util.scala:458:7]
assign io_deq_bits_uop_stq_idx = io_deq_bits_uop_stq_idx_0; // @[util.scala:458:7]
assign io_deq_bits_uop_rxq_idx = io_deq_bits_uop_rxq_idx_0; // @[util.scala:458:7]
assign io_deq_bits_uop_pdst = io_deq_bits_uop_pdst_0; // @[util.scala:458:7]
assign io_deq_bits_uop_prs1 = io_deq_bits_uop_prs1_0; // @[util.scala:458:7]
assign io_deq_bits_uop_prs2 = io_deq_bits_uop_prs2_0; // @[util.scala:458:7]
assign io_deq_bits_uop_prs3 = io_deq_bits_uop_prs3_0; // @[util.scala:458:7]
assign io_deq_bits_uop_ppred = io_deq_bits_uop_ppred_0; // @[util.scala:458:7]
assign io_deq_bits_uop_prs1_busy = io_deq_bits_uop_prs1_busy_0; // @[util.scala:458:7]
assign io_deq_bits_uop_prs2_busy = io_deq_bits_uop_prs2_busy_0; // @[util.scala:458:7]
assign io_deq_bits_uop_prs3_busy = io_deq_bits_uop_prs3_busy_0; // @[util.scala:458:7]
assign io_deq_bits_uop_ppred_busy = io_deq_bits_uop_ppred_busy_0; // @[util.scala:458:7]
assign io_deq_bits_uop_stale_pdst = io_deq_bits_uop_stale_pdst_0; // @[util.scala:458:7]
assign io_deq_bits_uop_exception = io_deq_bits_uop_exception_0; // @[util.scala:458:7]
assign io_deq_bits_uop_exc_cause = io_deq_bits_uop_exc_cause_0; // @[util.scala:458:7]
assign io_deq_bits_uop_mem_cmd = io_deq_bits_uop_mem_cmd_0; // @[util.scala:458:7]
assign io_deq_bits_uop_mem_size = io_deq_bits_uop_mem_size_0; // @[util.scala:458:7]
assign io_deq_bits_uop_mem_signed = io_deq_bits_uop_mem_signed_0; // @[util.scala:458:7]
assign io_deq_bits_uop_uses_ldq = io_deq_bits_uop_uses_ldq_0; // @[util.scala:458:7]
assign io_deq_bits_uop_uses_stq = io_deq_bits_uop_uses_stq_0; // @[util.scala:458:7]
assign io_deq_bits_uop_is_unique = io_deq_bits_uop_is_unique_0; // @[util.scala:458:7]
assign io_deq_bits_uop_flush_on_commit = io_deq_bits_uop_flush_on_commit_0; // @[util.scala:458:7]
assign io_deq_bits_uop_csr_cmd = io_deq_bits_uop_csr_cmd_0; // @[util.scala:458:7]
assign io_deq_bits_uop_ldst_is_rs1 = io_deq_bits_uop_ldst_is_rs1_0; // @[util.scala:458:7]
assign io_deq_bits_uop_ldst = io_deq_bits_uop_ldst_0; // @[util.scala:458:7]
assign io_deq_bits_uop_lrs1 = io_deq_bits_uop_lrs1_0; // @[util.scala:458:7]
assign io_deq_bits_uop_lrs2 = io_deq_bits_uop_lrs2_0; // @[util.scala:458:7]
assign io_deq_bits_uop_lrs3 = io_deq_bits_uop_lrs3_0; // @[util.scala:458:7]
assign io_deq_bits_uop_dst_rtype = io_deq_bits_uop_dst_rtype_0; // @[util.scala:458:7]
assign io_deq_bits_uop_lrs1_rtype = io_deq_bits_uop_lrs1_rtype_0; // @[util.scala:458:7]
assign io_deq_bits_uop_lrs2_rtype = io_deq_bits_uop_lrs2_rtype_0; // @[util.scala:458:7]
assign io_deq_bits_uop_frs3_en = io_deq_bits_uop_frs3_en_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fcn_dw = io_deq_bits_uop_fcn_dw_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fcn_op = io_deq_bits_uop_fcn_op_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_val = io_deq_bits_uop_fp_val_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_rm = io_deq_bits_uop_fp_rm_0; // @[util.scala:458:7]
assign io_deq_bits_uop_fp_typ = io_deq_bits_uop_fp_typ_0; // @[util.scala:458:7]
assign io_deq_bits_uop_xcpt_pf_if = io_deq_bits_uop_xcpt_pf_if_0; // @[util.scala:458:7]
assign io_deq_bits_uop_xcpt_ae_if = io_deq_bits_uop_xcpt_ae_if_0; // @[util.scala:458:7]
assign io_deq_bits_uop_xcpt_ma_if = io_deq_bits_uop_xcpt_ma_if_0; // @[util.scala:458:7]
assign io_deq_bits_uop_bp_debug_if = io_deq_bits_uop_bp_debug_if_0; // @[util.scala:458:7]
assign io_deq_bits_uop_bp_xcpt_if = io_deq_bits_uop_bp_xcpt_if_0; // @[util.scala:458:7]
assign io_deq_bits_uop_debug_fsrc = io_deq_bits_uop_debug_fsrc_0; // @[util.scala:458:7]
assign io_deq_bits_uop_debug_tsrc = io_deq_bits_uop_debug_tsrc_0; // @[util.scala:458:7]
assign io_deq_bits_data = io_deq_bits_data_0; // @[util.scala:458:7]
assign io_deq_bits_predicated = io_deq_bits_predicated_0; // @[util.scala:458:7]
assign io_deq_bits_fflags_valid = io_deq_bits_fflags_valid_0; // @[util.scala:458:7]
assign io_deq_bits_fflags_bits = io_deq_bits_fflags_bits_0; // @[util.scala:458:7]
assign io_count = io_count_0; // @[util.scala:458:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File IterativeTrapCheck.scala:
package saturn.frontend
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config._
import freechips.rocketchip.rocket._
import freechips.rocketchip.util._
import freechips.rocketchip.tile._
import freechips.rocketchip.tilelink._
import freechips.rocketchip.diplomacy._
import saturn.common._
class IndexMaskAccess(implicit p: Parameters) extends CoreModule()(p) with HasVectorParams {
val io = IO(new Bundle {
val in = Input(Bool())
val inst = Input(new VectorIssueInst)
val index_access = Flipped(new VectorIndexAccessIO)
val mask_access = Flipped(new VectorMaskAccessIO)
val access = new Bundle {
val ready = Output(Bool())
val eidx = Input(UInt(log2Ceil(maxVLMax).W))
val index = Output(UInt(64.W))
val mask = Output(Bool())
}
val pop = Input(Valid(UInt(log2Ceil(maxVLMax).W)))
val flush = Input(Bool())
})
val valid = RegInit(false.B)
val eidx = Reg(UInt(log2Ceil(maxVLMax).W))
// This all works only with pow2 buffers and eidx starting at 0
val valids = Reg(Vec(4, Bool()))
val indices = Reg(Vec(4, UInt(64.W)))
val masks = Reg(Vec(4, Bool()))
when (io.in) {
assert(!valid)
valid := true.B
eidx := 0.U
valids.foreach(_ := false.B)
}
val needs_index = io.inst.mop.isOneOf(mopOrdered, mopUnordered)
val needs_mask = !io.inst.vm
val index_ready = io.index_access.ready || !needs_index
val mask_ready = io.mask_access.ready || !needs_mask
io.index_access.valid := valid && needs_index && !valids(eidx(1,0))
io.mask_access.valid := valid && needs_mask && !valids(eidx(1,0))
io.index_access.vrs := io.inst.rs2
io.index_access.eidx := eidx
io.index_access.eew := io.inst.mem_idx_size
io.mask_access.eidx := eidx
when (valid && index_ready && mask_ready && !valids(eidx(1,0))) {
val next_eidx = eidx +& 1.U
eidx := eidx + 1.U
when (next_eidx === io.inst.vconfig.vl) {
valid := false.B
}
valids(eidx(1,0)) := true.B
indices(eidx(1,0)) := io.index_access.idx
masks(eidx(1,0)) := io.mask_access.mask
}
io.access.ready := valids(io.access.eidx(1,0))
io.access.index := indices(io.access.eidx(1,0))
io.access.mask := masks(io.access.eidx(1,0))
when (io.pop.fire) {
valids(io.pop.bits(1,0)) := false.B
}
when (io.flush) {
valid := false.B
}
}
class IterativeTrapCheck(implicit p: Parameters) extends CoreModule()(p) with HasVectorParams {
val io = IO(new Bundle {
val status = Input(new MStatus)
val in = Input(Valid(new VectorIssueInst))
val busy = Output(Bool())
val s0_tlb_req = Valid(new TLBReq(3))
val s1_tlb_req = Valid(new TLBReq(3))
val tlb_resp = Input(new TLBResp)
val retire = Output(Bool())
val pc = Output(UInt(vaddrBitsExtended.W))
val vstart = Valid(UInt(log2Ceil(maxVLMax).W))
val vconfig = Valid(new VConfig)
val xcpt = Valid(new Bundle {
val cause = UInt(xLen.W)
val tval = UInt(coreMaxAddrBits.W)
})
val inst = Output(new VectorIssueInst)
val issue = Decoupled(new VectorIssueInst)
val index_access = Flipped(new VectorIndexAccessIO)
val mask_access = Flipped(new VectorMaskAccessIO)
})
val replay_kill = WireInit(false.B)
def nextPage(addr: UInt) = ((addr + (1 << pgIdxBits).U) >> pgIdxBits) << pgIdxBits
val valid = RegInit(false.B)
val seg_hi = Reg(Bool())
val inst = Reg(new VectorIssueInst)
val eidx = Reg(UInt(log2Ceil(maxVLMax).W))
val addr = Reg(UInt(vaddrBitsExtended.W))
val tlb_backoff = RegInit(0.U(2.W))
when (tlb_backoff =/= 0.U) { tlb_backoff := tlb_backoff - 1.U }
val im_access = Module(new IndexMaskAccess)
im_access.io.in := io.in.valid
im_access.io.inst := inst
im_access.io.index_access <> io.index_access
im_access.io.mask_access <> io.mask_access
when (io.in.valid) {
assert(!valid)
valid := true.B
seg_hi := false.B
inst := io.in.bits
eidx := 0.U
addr := io.in.bits.rs1_data
}
val stride = MuxLookup(inst.mop, 0.U)(Seq(
(mopUnit -> ((inst.seg_nf +& 1.U) << inst.mem_elem_size)),
(mopStrided -> inst.rs2_data)
))
val indexed = inst.mop.isOneOf(mopOrdered, mopUnordered)
val index_ready = !indexed || im_access.io.access.ready
val mask_ready = inst.vm || im_access.io.access.ready
val index = Mux(indexed, im_access.io.access.index & eewBitMask(inst.mem_idx_size), 0.U)
val base = Mux(indexed, inst.rs1_data, addr)
val indexaddr = base + index
val tlb_addr = Mux(seg_hi, nextPage(indexaddr), indexaddr)
val seg_nf_consumed = ((1 << pgIdxBits).U - Mux(seg_hi, indexaddr, tlb_addr)(pgIdxBits-1,0)) >> inst.mem_elem_size
val seg_single_page = seg_nf_consumed >= (inst.seg_nf +& 1.U)
val masked = !im_access.io.access.mask && !inst.vm
val tlb_valid = eidx < inst.vconfig.vl && eidx >= inst.vstart && !masked
val ff = inst.umop === lumopFF && inst.mop === mopUnit
io.busy := valid
io.inst := inst
im_access.io.access.eidx := eidx
io.s0_tlb_req.valid := tlb_valid && tlb_backoff === 0.U && index_ready && mask_ready
io.s0_tlb_req.bits.vaddr := tlb_addr
io.s0_tlb_req.bits.passthrough := false.B
io.s0_tlb_req.bits.size := inst.mem_elem_size
io.s0_tlb_req.bits.cmd := Mux(inst.opcode(5), M_XWR, M_XRD)
io.s0_tlb_req.bits.prv := io.status.prv
io.s0_tlb_req.bits.v := io.status.v
io.s1_tlb_req.valid := RegEnable(io.s0_tlb_req.valid, false.B, valid)
io.s1_tlb_req.bits := RegEnable(io.s0_tlb_req.bits, valid)
val replay_fire = valid && eidx < inst.vconfig.vl && tlb_backoff === 0.U && index_ready && mask_ready
when (replay_fire) {
when (seg_hi || seg_single_page || inst.seg_nf === 0.U) {
eidx := eidx + 1.U
addr := addr + stride
seg_hi := false.B
} .otherwise {
seg_hi := true.B
}
}
val s1_valid = RegNext(replay_fire && !replay_kill, false.B)
val s1_eidx = RegEnable(eidx, valid)
val s1_masked = RegEnable(masked, valid)
val s1_seg_hi = RegEnable(seg_hi, valid)
val s1_base = RegEnable(base, valid)
val s1_tlb_valid = RegEnable(tlb_valid, valid)
val s1_tlb_addr = RegEnable(tlb_addr, valid)
val s1_seg_nf_consumed = RegEnable(seg_nf_consumed, valid)
val s1_seg_single_page = RegEnable(seg_single_page, valid)
when (io.tlb_resp.miss && s1_valid && tlb_backoff === 0.U) { tlb_backoff := 3.U }
val tlb_resp = WireInit(io.tlb_resp)
when (!s1_tlb_valid) {
tlb_resp.miss := false.B
}
val xcpts = Seq(
(tlb_resp.pf.st, Causes.store_page_fault.U),
(tlb_resp.pf.ld, Causes.load_page_fault.U),
(tlb_resp.gf.st, Causes.store_guest_page_fault.U),
(tlb_resp.gf.ld, Causes.load_guest_page_fault.U),
(tlb_resp.ae.st, Causes.store_access.U),
(tlb_resp.ae.ld, Causes.load_access.U),
(tlb_resp.ma.st, Causes.misaligned_store.U),
(tlb_resp.ma.ld, Causes.misaligned_load.U)
)
val xcpt = xcpts.map(_._1).orR && s1_eidx >= inst.vstart && !s1_masked
val cause = PriorityMux(xcpts)
io.issue.valid := false.B
io.issue.bits := inst
io.issue.bits.vstart := s1_eidx
io.issue.bits.vconfig.vl := s1_eidx +& 1.U
io.issue.bits.segend := inst.seg_nf
io.issue.bits.segstart := 0.U
io.issue.bits.page := tlb_resp.paddr >> pgIdxBits
io.xcpt.valid := false.B
io.pc := inst.pc
io.xcpt.bits.cause := cause
io.xcpt.bits.tval := s1_tlb_addr
io.vstart.valid := false.B
io.vstart.bits := s1_eidx
io.retire := false.B
io.vconfig.valid := false.B
io.vconfig.bits := inst.vconfig
io.vconfig.bits.vl := s1_eidx
im_access.io.pop.valid := false.B
im_access.io.pop.bits := s1_eidx
im_access.io.flush := false.B
when (s1_valid) {
io.issue.valid := !tlb_resp.miss && !xcpt && s1_eidx >= inst.vstart && !s1_masked
when (inst.seg_nf =/= 0.U && !s1_seg_single_page) {
when (!s1_seg_hi) {
io.issue.bits.segend := s1_seg_nf_consumed - 1.U
} .otherwise {
io.issue.bits.segstart := s1_seg_nf_consumed
}
}
when (s1_seg_hi || s1_seg_single_page || inst.seg_nf === 0.U) {
im_access.io.pop.valid := true.B
}
when (tlb_resp.miss || !io.issue.ready) {
tlb_backoff := 3.U
replay_kill := true.B
eidx := s1_eidx
addr := s1_base
seg_hi := s1_seg_hi
im_access.io.pop.valid := false.B
} .elsewhen (xcpt) {
val ff_nofault = ff && s1_eidx =/= 0.U
valid := false.B
replay_kill := true.B
io.retire := ff_nofault
io.xcpt.valid := !ff_nofault
io.vstart.valid := !ff_nofault
io.vconfig.valid := ff_nofault
im_access.io.flush := true.B
} .elsewhen ((s1_eidx +& 1.U) === inst.vconfig.vl && (s1_seg_hi || s1_seg_single_page || inst.seg_nf === 0.U)) {
valid := false.B
replay_kill := true.B
io.retire := true.B
io.vstart.valid := true.B
io.vstart.bits := 0.U
im_access.io.flush := true.B
}
}
}
| module IterativeTrapCheck( // @[IterativeTrapCheck.scala:83:7]
input clock, // @[IterativeTrapCheck.scala:83:7]
input reset, // @[IterativeTrapCheck.scala:83:7]
input [1:0] io_status_prv, // @[IterativeTrapCheck.scala:84:14]
input io_in_valid, // @[IterativeTrapCheck.scala:84:14]
input [39:0] io_in_bits_pc, // @[IterativeTrapCheck.scala:84:14]
input [31:0] io_in_bits_bits, // @[IterativeTrapCheck.scala:84:14]
input [8:0] io_in_bits_vconfig_vl, // @[IterativeTrapCheck.scala:84:14]
input io_in_bits_vconfig_vtype_vill, // @[IterativeTrapCheck.scala:84:14]
input [54:0] io_in_bits_vconfig_vtype_reserved, // @[IterativeTrapCheck.scala:84:14]
input io_in_bits_vconfig_vtype_vma, // @[IterativeTrapCheck.scala:84:14]
input io_in_bits_vconfig_vtype_vta, // @[IterativeTrapCheck.scala:84:14]
input [2:0] io_in_bits_vconfig_vtype_vsew, // @[IterativeTrapCheck.scala:84:14]
input io_in_bits_vconfig_vtype_vlmul_sign, // @[IterativeTrapCheck.scala:84:14]
input [1:0] io_in_bits_vconfig_vtype_vlmul_mag, // @[IterativeTrapCheck.scala:84:14]
input [7:0] io_in_bits_vstart, // @[IterativeTrapCheck.scala:84:14]
input [63:0] io_in_bits_rs1_data, // @[IterativeTrapCheck.scala:84:14]
input [63:0] io_in_bits_rs2_data, // @[IterativeTrapCheck.scala:84:14]
input [2:0] io_in_bits_rm, // @[IterativeTrapCheck.scala:84:14]
input [1:0] io_in_bits_emul, // @[IterativeTrapCheck.scala:84:14]
input [1:0] io_in_bits_mop, // @[IterativeTrapCheck.scala:84:14]
output io_busy, // @[IterativeTrapCheck.scala:84:14]
output io_s1_tlb_req_valid, // @[IterativeTrapCheck.scala:84:14]
output [39:0] io_s1_tlb_req_bits_vaddr, // @[IterativeTrapCheck.scala:84:14]
output [1:0] io_s1_tlb_req_bits_size, // @[IterativeTrapCheck.scala:84:14]
output [4:0] io_s1_tlb_req_bits_cmd, // @[IterativeTrapCheck.scala:84:14]
output [1:0] io_s1_tlb_req_bits_prv, // @[IterativeTrapCheck.scala:84:14]
input io_tlb_resp_miss, // @[IterativeTrapCheck.scala:84:14]
input [31:0] io_tlb_resp_paddr, // @[IterativeTrapCheck.scala:84:14]
input io_tlb_resp_pf_ld, // @[IterativeTrapCheck.scala:84:14]
input io_tlb_resp_pf_st, // @[IterativeTrapCheck.scala:84:14]
input io_tlb_resp_ae_ld, // @[IterativeTrapCheck.scala:84:14]
input io_tlb_resp_ae_st, // @[IterativeTrapCheck.scala:84:14]
input io_tlb_resp_ma_ld, // @[IterativeTrapCheck.scala:84:14]
input io_tlb_resp_ma_st, // @[IterativeTrapCheck.scala:84:14]
output io_retire, // @[IterativeTrapCheck.scala:84:14]
output [39:0] io_pc, // @[IterativeTrapCheck.scala:84:14]
output io_vstart_valid, // @[IterativeTrapCheck.scala:84:14]
output [7:0] io_vstart_bits, // @[IterativeTrapCheck.scala:84:14]
output io_vconfig_valid, // @[IterativeTrapCheck.scala:84:14]
output [8:0] io_vconfig_bits_vl, // @[IterativeTrapCheck.scala:84:14]
output io_vconfig_bits_vtype_vill, // @[IterativeTrapCheck.scala:84:14]
output [54:0] io_vconfig_bits_vtype_reserved, // @[IterativeTrapCheck.scala:84:14]
output io_vconfig_bits_vtype_vma, // @[IterativeTrapCheck.scala:84:14]
output io_vconfig_bits_vtype_vta, // @[IterativeTrapCheck.scala:84:14]
output [2:0] io_vconfig_bits_vtype_vsew, // @[IterativeTrapCheck.scala:84:14]
output io_vconfig_bits_vtype_vlmul_sign, // @[IterativeTrapCheck.scala:84:14]
output [1:0] io_vconfig_bits_vtype_vlmul_mag, // @[IterativeTrapCheck.scala:84:14]
output io_xcpt_valid, // @[IterativeTrapCheck.scala:84:14]
output [63:0] io_xcpt_bits_cause, // @[IterativeTrapCheck.scala:84:14]
output [39:0] io_xcpt_bits_tval, // @[IterativeTrapCheck.scala:84:14]
input io_issue_ready, // @[IterativeTrapCheck.scala:84:14]
output io_issue_valid, // @[IterativeTrapCheck.scala:84:14]
output [31:0] io_issue_bits_bits, // @[IterativeTrapCheck.scala:84:14]
output [8:0] io_issue_bits_vconfig_vl, // @[IterativeTrapCheck.scala:84:14]
output [2:0] io_issue_bits_vconfig_vtype_vsew, // @[IterativeTrapCheck.scala:84:14]
output io_issue_bits_vconfig_vtype_vlmul_sign, // @[IterativeTrapCheck.scala:84:14]
output [1:0] io_issue_bits_vconfig_vtype_vlmul_mag, // @[IterativeTrapCheck.scala:84:14]
output [7:0] io_issue_bits_vstart, // @[IterativeTrapCheck.scala:84:14]
output [2:0] io_issue_bits_segstart, // @[IterativeTrapCheck.scala:84:14]
output [2:0] io_issue_bits_segend, // @[IterativeTrapCheck.scala:84:14]
output [63:0] io_issue_bits_rs1_data, // @[IterativeTrapCheck.scala:84:14]
output [63:0] io_issue_bits_rs2_data, // @[IterativeTrapCheck.scala:84:14]
output [19:0] io_issue_bits_page, // @[IterativeTrapCheck.scala:84:14]
output [2:0] io_issue_bits_rm, // @[IterativeTrapCheck.scala:84:14]
output [1:0] io_issue_bits_emul, // @[IterativeTrapCheck.scala:84:14]
output [1:0] io_issue_bits_mop, // @[IterativeTrapCheck.scala:84:14]
input io_index_access_ready, // @[IterativeTrapCheck.scala:84:14]
output io_index_access_valid, // @[IterativeTrapCheck.scala:84:14]
output [4:0] io_index_access_vrs, // @[IterativeTrapCheck.scala:84:14]
output [8:0] io_index_access_eidx, // @[IterativeTrapCheck.scala:84:14]
output [1:0] io_index_access_eew, // @[IterativeTrapCheck.scala:84:14]
input [63:0] io_index_access_idx, // @[IterativeTrapCheck.scala:84:14]
input io_mask_access_ready, // @[IterativeTrapCheck.scala:84:14]
output io_mask_access_valid, // @[IterativeTrapCheck.scala:84:14]
output [8:0] io_mask_access_eidx, // @[IterativeTrapCheck.scala:84:14]
input io_mask_access_mask // @[IterativeTrapCheck.scala:84:14]
);
wire _im_access_io_access_ready; // @[IterativeTrapCheck.scala:118:25]
wire [63:0] _im_access_io_access_index; // @[IterativeTrapCheck.scala:118:25]
wire _im_access_io_access_mask; // @[IterativeTrapCheck.scala:118:25]
reg valid; // @[IterativeTrapCheck.scala:110:23]
reg seg_hi; // @[IterativeTrapCheck.scala:111:19]
reg [39:0] inst_pc; // @[IterativeTrapCheck.scala:112:19]
reg [31:0] inst_bits; // @[IterativeTrapCheck.scala:112:19]
reg [8:0] inst_vconfig_vl; // @[IterativeTrapCheck.scala:112:19]
reg inst_vconfig_vtype_vill; // @[IterativeTrapCheck.scala:112:19]
reg [54:0] inst_vconfig_vtype_reserved; // @[IterativeTrapCheck.scala:112:19]
reg inst_vconfig_vtype_vma; // @[IterativeTrapCheck.scala:112:19]
reg inst_vconfig_vtype_vta; // @[IterativeTrapCheck.scala:112:19]
reg [2:0] inst_vconfig_vtype_vsew; // @[IterativeTrapCheck.scala:112:19]
reg inst_vconfig_vtype_vlmul_sign; // @[IterativeTrapCheck.scala:112:19]
reg [1:0] inst_vconfig_vtype_vlmul_mag; // @[IterativeTrapCheck.scala:112:19]
reg [7:0] inst_vstart; // @[IterativeTrapCheck.scala:112:19]
reg [63:0] inst_rs1_data; // @[IterativeTrapCheck.scala:112:19]
reg [63:0] inst_rs2_data; // @[IterativeTrapCheck.scala:112:19]
reg [2:0] inst_rm; // @[IterativeTrapCheck.scala:112:19]
reg [1:0] inst_emul; // @[IterativeTrapCheck.scala:112:19]
reg [1:0] inst_mop; // @[IterativeTrapCheck.scala:112:19]
reg [7:0] eidx; // @[IterativeTrapCheck.scala:113:19]
reg [39:0] addr; // @[IterativeTrapCheck.scala:114:19]
reg [1:0] tlb_backoff; // @[IterativeTrapCheck.scala:115:28] |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File AsyncQueue.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
case class AsyncQueueParams(
depth: Int = 8,
sync: Int = 3,
safe: Boolean = true,
// If safe is true, then effort is made to resynchronize the crossing indices when either side is reset.
// This makes it safe/possible to reset one side of the crossing (but not the other) when the queue is empty.
narrow: Boolean = false)
// If narrow is true then the read mux is moved to the source side of the crossing.
// This reduces the number of level shifters in the case where the clock crossing is also a voltage crossing,
// at the expense of a combinational path from the sink to the source and back to the sink.
{
require (depth > 0 && isPow2(depth))
require (sync >= 2)
val bits = log2Ceil(depth)
val wires = if (narrow) 1 else depth
}
object AsyncQueueParams {
// When there is only one entry, we don't need narrow.
def singleton(sync: Int = 3, safe: Boolean = true) = AsyncQueueParams(1, sync, safe, false)
}
class AsyncBundleSafety extends Bundle {
val ridx_valid = Input (Bool())
val widx_valid = Output(Bool())
val source_reset_n = Output(Bool())
val sink_reset_n = Input (Bool())
}
class AsyncBundle[T <: Data](private val gen: T, val params: AsyncQueueParams = AsyncQueueParams()) extends Bundle {
// Data-path synchronization
val mem = Output(Vec(params.wires, gen))
val ridx = Input (UInt((params.bits+1).W))
val widx = Output(UInt((params.bits+1).W))
val index = params.narrow.option(Input(UInt(params.bits.W)))
// Signals used to self-stabilize a safe AsyncQueue
val safe = params.safe.option(new AsyncBundleSafety)
}
object GrayCounter {
def apply(bits: Int, increment: Bool = true.B, clear: Bool = false.B, name: String = "binary"): UInt = {
val incremented = Wire(UInt(bits.W))
val binary = RegNext(next=incremented, init=0.U).suggestName(name)
incremented := Mux(clear, 0.U, binary + increment.asUInt)
incremented ^ (incremented >> 1)
}
}
class AsyncValidSync(sync: Int, desc: String) extends RawModule {
val io = IO(new Bundle {
val in = Input(Bool())
val out = Output(Bool())
})
val clock = IO(Input(Clock()))
val reset = IO(Input(AsyncReset()))
withClockAndReset(clock, reset){
io.out := AsyncResetSynchronizerShiftReg(io.in, sync, Some(desc))
}
}
class AsyncQueueSource[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module {
override def desiredName = s"AsyncQueueSource_${gen.typeName}"
val io = IO(new Bundle {
// These come from the source domain
val enq = Flipped(Decoupled(gen))
// These cross to the sink clock domain
val async = new AsyncBundle(gen, params)
})
val bits = params.bits
val sink_ready = WireInit(true.B)
val mem = Reg(Vec(params.depth, gen)) // This does NOT need to be reset at all.
val widx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.enq.fire, !sink_ready, "widx_bin"))
val ridx = AsyncResetSynchronizerShiftReg(io.async.ridx, params.sync, Some("ridx_gray"))
val ready = sink_ready && widx =/= (ridx ^ (params.depth | params.depth >> 1).U)
val index = if (bits == 0) 0.U else io.async.widx(bits-1, 0) ^ (io.async.widx(bits, bits) << (bits-1))
when (io.enq.fire) { mem(index) := io.enq.bits }
val ready_reg = withReset(reset.asAsyncReset)(RegNext(next=ready, init=false.B).suggestName("ready_reg"))
io.enq.ready := ready_reg && sink_ready
val widx_reg = withReset(reset.asAsyncReset)(RegNext(next=widx, init=0.U).suggestName("widx_gray"))
io.async.widx := widx_reg
io.async.index match {
case Some(index) => io.async.mem(0) := mem(index)
case None => io.async.mem := mem
}
io.async.safe.foreach { sio =>
val source_valid_0 = Module(new AsyncValidSync(params.sync, "source_valid_0"))
val source_valid_1 = Module(new AsyncValidSync(params.sync, "source_valid_1"))
val sink_extend = Module(new AsyncValidSync(params.sync, "sink_extend"))
val sink_valid = Module(new AsyncValidSync(params.sync, "sink_valid"))
source_valid_0.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
source_valid_1.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
sink_extend .reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
sink_valid .reset := reset.asAsyncReset
source_valid_0.clock := clock
source_valid_1.clock := clock
sink_extend .clock := clock
sink_valid .clock := clock
source_valid_0.io.in := true.B
source_valid_1.io.in := source_valid_0.io.out
sio.widx_valid := source_valid_1.io.out
sink_extend.io.in := sio.ridx_valid
sink_valid.io.in := sink_extend.io.out
sink_ready := sink_valid.io.out
sio.source_reset_n := !reset.asBool
// Assert that if there is stuff in the queue, then reset cannot happen
// Impossible to write because dequeue can occur on the receiving side,
// then reset allowed to happen, but write side cannot know that dequeue
// occurred.
// TODO: write some sort of sanity check assertion for users
// that denote don't reset when there is activity
// assert (!(reset || !sio.sink_reset_n) || !io.enq.valid, "Enqueue while sink is reset and AsyncQueueSource is unprotected")
// assert (!reset_rise || prev_idx_match.asBool, "Sink reset while AsyncQueueSource not empty")
}
}
class AsyncQueueSink[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module {
override def desiredName = s"AsyncQueueSink_${gen.typeName}"
val io = IO(new Bundle {
// These come from the sink domain
val deq = Decoupled(gen)
// These cross to the source clock domain
val async = Flipped(new AsyncBundle(gen, params))
})
val bits = params.bits
val source_ready = WireInit(true.B)
val ridx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.deq.fire, !source_ready, "ridx_bin"))
val widx = AsyncResetSynchronizerShiftReg(io.async.widx, params.sync, Some("widx_gray"))
val valid = source_ready && ridx =/= widx
// The mux is safe because timing analysis ensures ridx has reached the register
// On an ASIC, changes to the unread location cannot affect the selected value
// On an FPGA, only one input changes at a time => mem updates don't cause glitches
// The register only latches when the selected valued is not being written
val index = if (bits == 0) 0.U else ridx(bits-1, 0) ^ (ridx(bits, bits) << (bits-1))
io.async.index.foreach { _ := index }
// This register does not NEED to be reset, as its contents will not
// be considered unless the asynchronously reset deq valid register is set.
// It is possible that bits latches when the source domain is reset / has power cut
// This is safe, because isolation gates brought mem low before the zeroed widx reached us
val deq_bits_nxt = io.async.mem(if (params.narrow) 0.U else index)
io.deq.bits := ClockCrossingReg(deq_bits_nxt, en = valid, doInit = false, name = Some("deq_bits_reg"))
val valid_reg = withReset(reset.asAsyncReset)(RegNext(next=valid, init=false.B).suggestName("valid_reg"))
io.deq.valid := valid_reg && source_ready
val ridx_reg = withReset(reset.asAsyncReset)(RegNext(next=ridx, init=0.U).suggestName("ridx_gray"))
io.async.ridx := ridx_reg
io.async.safe.foreach { sio =>
val sink_valid_0 = Module(new AsyncValidSync(params.sync, "sink_valid_0"))
val sink_valid_1 = Module(new AsyncValidSync(params.sync, "sink_valid_1"))
val source_extend = Module(new AsyncValidSync(params.sync, "source_extend"))
val source_valid = Module(new AsyncValidSync(params.sync, "source_valid"))
sink_valid_0 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
sink_valid_1 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
source_extend.reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
source_valid .reset := reset.asAsyncReset
sink_valid_0 .clock := clock
sink_valid_1 .clock := clock
source_extend.clock := clock
source_valid .clock := clock
sink_valid_0.io.in := true.B
sink_valid_1.io.in := sink_valid_0.io.out
sio.ridx_valid := sink_valid_1.io.out
source_extend.io.in := sio.widx_valid
source_valid.io.in := source_extend.io.out
source_ready := source_valid.io.out
sio.sink_reset_n := !reset.asBool
// TODO: write some sort of sanity check assertion for users
// that denote don't reset when there is activity
//
// val reset_and_extend = !source_ready || !sio.source_reset_n || reset.asBool
// val reset_and_extend_prev = RegNext(reset_and_extend, true.B)
// val reset_rise = !reset_and_extend_prev && reset_and_extend
// val prev_idx_match = AsyncResetReg(updateData=(io.async.widx===io.async.ridx), resetData=0)
// assert (!reset_rise || prev_idx_match.asBool, "Source reset while AsyncQueueSink not empty")
}
}
object FromAsyncBundle
{
// Sometimes it makes sense for the sink to have different sync than the source
def apply[T <: Data](x: AsyncBundle[T]): DecoupledIO[T] = apply(x, x.params.sync)
def apply[T <: Data](x: AsyncBundle[T], sync: Int): DecoupledIO[T] = {
val sink = Module(new AsyncQueueSink(chiselTypeOf(x.mem(0)), x.params.copy(sync = sync)))
sink.io.async <> x
sink.io.deq
}
}
object ToAsyncBundle
{
def apply[T <: Data](x: ReadyValidIO[T], params: AsyncQueueParams = AsyncQueueParams()): AsyncBundle[T] = {
val source = Module(new AsyncQueueSource(chiselTypeOf(x.bits), params))
source.io.enq <> x
source.io.async
}
}
class AsyncQueue[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Crossing[T] {
val io = IO(new CrossingIO(gen))
val source = withClockAndReset(io.enq_clock, io.enq_reset) { Module(new AsyncQueueSource(gen, params)) }
val sink = withClockAndReset(io.deq_clock, io.deq_reset) { Module(new AsyncQueueSink (gen, params)) }
source.io.enq <> io.enq
io.deq <> sink.io.deq
sink.io.async <> source.io.async
}
| module AsyncValidSync_48( // @[AsyncQueue.scala:58:7]
output io_out, // @[AsyncQueue.scala:59:14]
input clock, // @[AsyncQueue.scala:63:17]
input reset // @[AsyncQueue.scala:64:17]
);
wire io_in = 1'h1; // @[ShiftReg.scala:45:23]
wire _io_out_WIRE; // @[ShiftReg.scala:48:24]
wire io_out_0; // @[AsyncQueue.scala:58:7]
assign io_out_0 = _io_out_WIRE; // @[ShiftReg.scala:48:24]
AsyncResetSynchronizerShiftReg_w1_d3_i0_48 io_out_sink_valid_0 ( // @[ShiftReg.scala:45:23]
.clock (clock),
.reset (reset),
.io_q (_io_out_WIRE)
); // @[ShiftReg.scala:45:23]
assign io_out = io_out_0; // @[AsyncQueue.scala:58:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Monitor.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceLine
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import freechips.rocketchip.diplomacy.EnableMonitors
import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode}
import freechips.rocketchip.util.PlusArg
case class TLMonitorArgs(edge: TLEdge)
abstract class TLMonitorBase(args: TLMonitorArgs) extends Module
{
val io = IO(new Bundle {
val in = Input(new TLBundle(args.edge.bundle))
})
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit
legalize(io.in, args.edge, reset)
}
object TLMonitor {
def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = {
if (enable) {
EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) }
} else { node }
}
}
class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args)
{
require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal))
val cover_prop_class = PropertyClass.Default
//Like assert but can flip to being an assumption for formal verification
def monAssert(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir, cond, message, PropertyClass.Default)
}
def assume(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir.flip, cond, message, PropertyClass.Default)
}
def extra = {
args.edge.sourceInfo match {
case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)"
case _ => ""
}
}
def visible(address: UInt, source: UInt, edge: TLEdge) =
edge.client.clients.map { c =>
!c.sourceId.contains(source) ||
c.visibility.map(_.contains(address)).reduce(_ || _)
}.reduce(_ && _)
def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = {
//switch this flag to turn on diplomacy in error messages
def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n"
monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra)
// Reuse these subexpressions to save some firrtl lines
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility")
//The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B
//TODO: check for acquireT?
when (bundle.opcode === TLMessages.AcquireBlock) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AcquirePerm) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra)
monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra)
monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra)
monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra)
}
}
def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = {
monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility")
// Reuse these subexpressions to save some firrtl lines
val address_ok = edge.manager.containsSafe(edge.address(bundle))
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source
when (bundle.opcode === TLMessages.Probe) {
assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra)
assume (address_ok, "'B' channel Probe carries unmanaged address" + extra)
assume (legal_source, "'B' channel Probe carries source that is not first source" + extra)
assume (is_aligned, "'B' channel Probe address not aligned to size" + extra)
assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra)
assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra)
assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra)
monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra)
monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra)
}
}
def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = {
monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val address_ok = edge.manager.containsSafe(edge.address(bundle))
monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility")
when (bundle.opcode === TLMessages.ProbeAck) {
monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ProbeAckData) {
monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.Release) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ReleaseData) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra)
}
}
def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = {
assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val sink_ok = bundle.sink < edge.manager.endSinkId.U
val deny_put_ok = edge.manager.mayDenyPut.B
val deny_get_ok = edge.manager.mayDenyGet.B
when (bundle.opcode === TLMessages.ReleaseAck) {
assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra)
assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra)
assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra)
}
when (bundle.opcode === TLMessages.Grant) {
assume (source_ok, "'D' channel Grant carries invalid source ID" + extra)
assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra)
assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra)
}
when (bundle.opcode === TLMessages.GrantData) {
assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra)
assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra)
}
}
def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = {
val sink_ok = bundle.sink < edge.manager.endSinkId.U
monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra)
}
def legalizeFormat(bundle: TLBundle, edge: TLEdge) = {
when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) }
when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) }
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) }
when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) }
when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) }
} else {
monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra)
monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra)
monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra)
}
}
def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = {
val a_first = edge.first(a.bits, a.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (a.valid && !a_first) {
monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra)
monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra)
monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra)
monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra)
monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra)
}
when (a.fire && a_first) {
opcode := a.bits.opcode
param := a.bits.param
size := a.bits.size
source := a.bits.source
address := a.bits.address
}
}
def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = {
val b_first = edge.first(b.bits, b.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (b.valid && !b_first) {
monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra)
monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra)
monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra)
monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra)
monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra)
}
when (b.fire && b_first) {
opcode := b.bits.opcode
param := b.bits.param
size := b.bits.size
source := b.bits.source
address := b.bits.address
}
}
def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = {
// Symbolic variable
val sym_source = Wire(UInt(edge.client.endSourceId.W))
// TODO: Connect sym_source to a fixed value for simulation and to a
// free wire in formal
sym_source := 0.U
// Type casting Int to UInt
val maxSourceId = Wire(UInt(edge.client.endSourceId.W))
maxSourceId := edge.client.endSourceId.U
// Delayed verison of sym_source
val sym_source_d = Reg(UInt(edge.client.endSourceId.W))
sym_source_d := sym_source
// These will be constraints for FV setup
Property(
MonitorDirection.Monitor,
(sym_source === sym_source_d),
"sym_source should remain stable",
PropertyClass.Default)
Property(
MonitorDirection.Monitor,
(sym_source <= maxSourceId),
"sym_source should take legal value",
PropertyClass.Default)
val my_resp_pend = RegInit(false.B)
val my_opcode = Reg(UInt())
val my_size = Reg(UInt())
val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire)
val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire)
val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source)
val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source)
val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat)
val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend)
when (my_set_resp_pend) {
my_resp_pend := true.B
} .elsewhen (my_clr_resp_pend) {
my_resp_pend := false.B
}
when (my_a_first_beat) {
my_opcode := bundle.a.bits.opcode
my_size := bundle.a.bits.size
}
val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size)
val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode)
val my_resp_opcode_legal = Wire(Bool())
when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) ||
(my_resp_opcode === TLMessages.LogicalData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData)
} .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck)
} .otherwise {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck)
}
monAssert (IfThen(my_resp_pend, !my_a_first_beat),
"Request message should not be sent with a source ID, for which a response message" +
"is already pending (not received until current cycle) for a prior request message" +
"with the same source ID" + extra)
assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)),
"Response message should be accepted with a source ID only if a request message with the" +
"same source ID has been accepted or is being accepted in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)),
"Response message should be sent with a source ID only if a request message with the" +
"same source ID has been accepted or is being sent in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)),
"If d_valid is 1, then d_size should be same as a_size of the corresponding request" +
"message" + extra)
assume (IfThen(my_d_first_beat, my_resp_opcode_legal),
"If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" +
"request message" + extra)
}
def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = {
val c_first = edge.first(c.bits, c.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (c.valid && !c_first) {
monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra)
monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra)
monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra)
monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra)
monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra)
}
when (c.fire && c_first) {
opcode := c.bits.opcode
param := c.bits.param
size := c.bits.size
source := c.bits.source
address := c.bits.address
}
}
def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = {
val d_first = edge.first(d.bits, d.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val sink = Reg(UInt())
val denied = Reg(Bool())
when (d.valid && !d_first) {
assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra)
assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra)
assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra)
assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra)
assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra)
assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra)
}
when (d.fire && d_first) {
opcode := d.bits.opcode
param := d.bits.param
size := d.bits.size
source := d.bits.source
sink := d.bits.sink
denied := d.bits.denied
}
}
def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = {
legalizeMultibeatA(bundle.a, edge)
legalizeMultibeatD(bundle.d, edge)
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
legalizeMultibeatB(bundle.b, edge)
legalizeMultibeatC(bundle.c, edge)
}
}
//This is left in for almond which doesn't adhere to the tilelink protocol
@deprecated("Use legalizeADSource instead if possible","")
def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.client.endSourceId.W))
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val a_set = WireInit(0.U(edge.client.endSourceId.W))
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra)
}
if (edge.manager.minLatency > 0) {
assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = {
val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size)
val log_a_size_bus_size = log2Ceil(a_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error
inflight.suggestName("inflight")
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
inflight_opcodes.suggestName("inflight_opcodes")
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
inflight_sizes.suggestName("inflight_sizes")
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
a_first.suggestName("a_first")
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
d_first.suggestName("d_first")
val a_set = WireInit(0.U(edge.client.endSourceId.W))
val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
a_set.suggestName("a_set")
a_set_wo_ready.suggestName("a_set_wo_ready")
val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
a_opcodes_set.suggestName("a_opcodes_set")
val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
a_sizes_set.suggestName("a_sizes_set")
val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W))
a_opcode_lookup.suggestName("a_opcode_lookup")
a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U
val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W))
a_size_lookup.suggestName("a_size_lookup")
a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U
val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant))
val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant))
val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W))
a_opcodes_set_interm.suggestName("a_opcodes_set_interm")
val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W))
a_sizes_set_interm.suggestName("a_sizes_set_interm")
when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) {
a_set_wo_ready := UIntToOH(bundle.a.bits.source)
}
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U
a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U
a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U)
a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U)
monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
d_opcodes_clr.suggestName("d_opcodes_clr")
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) ||
(bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra)
assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) ||
(bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra)
assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) {
assume((!bundle.d.ready) || bundle.a.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = {
val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size)
val log_c_size_bus_size = log2Ceil(c_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W))
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
inflight.suggestName("inflight")
inflight_opcodes.suggestName("inflight_opcodes")
inflight_sizes.suggestName("inflight_sizes")
val c_first = edge.first(bundle.c.bits, bundle.c.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
c_first.suggestName("c_first")
d_first.suggestName("d_first")
val c_set = WireInit(0.U(edge.client.endSourceId.W))
val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
c_set.suggestName("c_set")
c_set_wo_ready.suggestName("c_set_wo_ready")
c_opcodes_set.suggestName("c_opcodes_set")
c_sizes_set.suggestName("c_sizes_set")
val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W))
val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W))
c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U
c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U
c_opcode_lookup.suggestName("c_opcode_lookup")
c_size_lookup.suggestName("c_size_lookup")
val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W))
val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W))
c_opcodes_set_interm.suggestName("c_opcodes_set_interm")
c_sizes_set_interm.suggestName("c_sizes_set_interm")
when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) {
c_set_wo_ready := UIntToOH(bundle.c.bits.source)
}
when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) {
c_set := UIntToOH(bundle.c.bits.source)
c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U
c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U
c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U)
c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U)
monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra)
}
val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
d_opcodes_clr.suggestName("d_opcodes_clr")
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) {
assume((!bundle.d.ready) || bundle.c.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
when (c_set_wo_ready.orR) {
assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra)
}
}
inflight := (inflight | c_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.manager.endSinkId.W))
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val e_first = true.B
val d_set = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) {
d_set := UIntToOH(bundle.d.bits.sink)
assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra)
}
val e_clr = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) {
e_clr := UIntToOH(bundle.e.bits.sink)
monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra)
}
// edge.client.minLatency applies to BC, not DE
inflight := (inflight | d_set) & ~e_clr
}
def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = {
val sourceBits = log2Ceil(edge.client.endSourceId)
val tooBig = 14 // >16kB worth of flight information gets to be too much
if (sourceBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked")
} else {
if (args.edge.params(TestplanTestType).simulation) {
if (args.edge.params(TLMonitorStrictMode)) {
legalizeADSource(bundle, edge)
legalizeCDSource(bundle, edge)
} else {
legalizeADSourceOld(bundle, edge)
}
}
if (args.edge.params(TestplanTestType).formal) {
legalizeADSourceFormal(bundle, edge)
}
}
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
// legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize...
val sinkBits = log2Ceil(edge.manager.endSinkId)
if (sinkBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked")
} else {
legalizeDESink(bundle, edge)
}
}
}
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = {
legalizeFormat (bundle, edge)
legalizeMultibeat (bundle, edge)
legalizeUnique (bundle, edge)
}
}
File Misc.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
import chisel3.util.random.LFSR
import org.chipsalliance.cde.config.Parameters
import scala.math._
class ParameterizedBundle(implicit p: Parameters) extends Bundle
trait Clocked extends Bundle {
val clock = Clock()
val reset = Bool()
}
object DecoupledHelper {
def apply(rvs: Bool*) = new DecoupledHelper(rvs)
}
class DecoupledHelper(val rvs: Seq[Bool]) {
def fire(exclude: Bool, includes: Bool*) = {
require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!")
(rvs.filter(_ ne exclude) ++ includes).reduce(_ && _)
}
def fire() = {
rvs.reduce(_ && _)
}
}
object MuxT {
def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2))
def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3))
def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4))
}
/** Creates a cascade of n MuxTs to search for a key value. */
object MuxTLookup {
def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
}
object ValidMux {
def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = {
apply(v1 +: v2.toSeq)
}
def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = {
val out = Wire(Valid(valids.head.bits.cloneType))
out.valid := valids.map(_.valid).reduce(_ || _)
out.bits := MuxCase(valids.head.bits,
valids.map(v => (v.valid -> v.bits)))
out
}
}
object Str
{
def apply(s: String): UInt = {
var i = BigInt(0)
require(s.forall(validChar _))
for (c <- s)
i = (i << 8) | c
i.U((s.length*8).W)
}
def apply(x: Char): UInt = {
require(validChar(x))
x.U(8.W)
}
def apply(x: UInt): UInt = apply(x, 10)
def apply(x: UInt, radix: Int): UInt = {
val rad = radix.U
val w = x.getWidth
require(w > 0)
var q = x
var s = digit(q % rad)
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s)
}
s
}
def apply(x: SInt): UInt = apply(x, 10)
def apply(x: SInt, radix: Int): UInt = {
val neg = x < 0.S
val abs = x.abs.asUInt
if (radix != 10) {
Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix))
} else {
val rad = radix.U
val w = abs.getWidth
require(w > 0)
var q = abs
var s = digit(q % rad)
var needSign = neg
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
val placeSpace = q === 0.U
val space = Mux(needSign, Str('-'), Str(' '))
needSign = needSign && !placeSpace
s = Cat(Mux(placeSpace, space, digit(q % rad)), s)
}
Cat(Mux(needSign, Str('-'), Str(' ')), s)
}
}
private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0)
private def validChar(x: Char) = x == (x & 0xFF)
}
object Split
{
def apply(x: UInt, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n2: Int, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
}
object Random
{
def apply(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0)
else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod))
}
def apply(mod: Int): UInt = apply(mod, randomizer)
def oneHot(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0))
else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt
}
def oneHot(mod: Int): UInt = oneHot(mod, randomizer)
private def randomizer = LFSR(16)
private def partition(value: UInt, slices: Int) =
Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U)
}
object Majority {
def apply(in: Set[Bool]): Bool = {
val n = (in.size >> 1) + 1
val clauses = in.subsets(n).map(_.reduce(_ && _))
clauses.reduce(_ || _)
}
def apply(in: Seq[Bool]): Bool = apply(in.toSet)
def apply(in: UInt): Bool = apply(in.asBools.toSet)
}
object PopCountAtLeast {
private def two(x: UInt): (Bool, Bool) = x.getWidth match {
case 1 => (x.asBool, false.B)
case n =>
val half = x.getWidth / 2
val (leftOne, leftTwo) = two(x(half - 1, 0))
val (rightOne, rightTwo) = two(x(x.getWidth - 1, half))
(leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne))
}
def apply(x: UInt, n: Int): Bool = n match {
case 0 => true.B
case 1 => x.orR
case 2 => two(x)._2
case 3 => PopCount(x) >= n.U
}
}
// This gets used everywhere, so make the smallest circuit possible ...
// Given an address and size, create a mask of beatBytes size
// eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111
// groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01
object MaskGen {
def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = {
require (groupBy >= 1 && beatBytes >= groupBy)
require (isPow2(beatBytes) && isPow2(groupBy))
val lgBytes = log2Ceil(beatBytes)
val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U
def helper(i: Int): Seq[(Bool, Bool)] = {
if (i == 0) {
Seq((lgSize >= lgBytes.asUInt, true.B))
} else {
val sub = helper(i-1)
val size = sizeOH(lgBytes - i)
val bit = addr_lo(lgBytes - i)
val nbit = !bit
Seq.tabulate (1 << i) { j =>
val (sub_acc, sub_eq) = sub(j/2)
val eq = sub_eq && (if (j % 2 == 1) bit else nbit)
val acc = sub_acc || (size && eq)
(acc, eq)
}
}
}
if (groupBy == beatBytes) 1.U else
Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse)
}
}
File PlusArg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.experimental._
import chisel3.util.HasBlackBoxResource
@deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05")
case class PlusArgInfo(default: BigInt, docstring: String)
/** Case class for PlusArg information
*
* @tparam A scala type of the PlusArg value
* @param default optional default value
* @param docstring text to include in the help
* @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT)
*/
private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String)
/** Typeclass for converting a type to a doctype string
* @tparam A some type
*/
trait Doctypeable[A] {
/** Return the doctype string for some option */
def toDoctype(a: Option[A]): String
}
/** Object containing implementations of the Doctypeable typeclass */
object Doctypes {
/** Converts an Int => "INT" */
implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" }
/** Converts a BigInt => "INT" */
implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" }
/** Converts a String => "STRING" */
implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" }
}
class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map(
"FORMAT" -> StringParam(format),
"DEFAULT" -> IntParam(default),
"WIDTH" -> IntParam(width)
)) with HasBlackBoxResource {
val io = IO(new Bundle {
val out = Output(UInt(width.W))
})
addResource("/vsrc/plusarg_reader.v")
}
/* This wrapper class has no outputs, making it clear it is a simulation-only construct */
class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module {
val io = IO(new Bundle {
val count = Input(UInt(width.W))
})
val max = Module(new plusarg_reader(format, default, docstring, width)).io.out
when (max > 0.U) {
assert (io.count < max, s"Timeout exceeded: $docstring")
}
}
import Doctypes._
object PlusArg
{
/** PlusArg("foo") will return 42.U if the simulation is run with +foo=42
* Do not use this as an initial register value. The value is set in an
* initial block and thus accessing it from another initial is racey.
* Add a docstring to document the arg, which can be dumped in an elaboration
* pass.
*/
def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out
}
/** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert
* to kill the simulation when count exceeds the specified integer argument.
* Default 0 will never assert.
*/
def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count
}
}
object PlusArgArtefacts {
private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty
/* Add a new PlusArg */
@deprecated(
"Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08",
"Rocket Chip 2020.05"
)
def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring)
/** Add a new PlusArg
*
* @tparam A scala type of the PlusArg value
* @param name name for the PlusArg
* @param default optional default value
* @param docstring text to include in the help
*/
def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit =
artefacts = artefacts ++
Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default)))
/* From plus args, generate help text */
private def serializeHelp_cHeader(tab: String = ""): String = artefacts
.map{ case(arg, info) =>
s"""|$tab+$arg=${info.doctype}\\n\\
|$tab${" "*20}${info.docstring}\\n\\
|""".stripMargin ++ info.default.map{ case default =>
s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("")
}.toSeq.mkString("\\n\\\n") ++ "\""
/* From plus args, generate a char array of their names */
private def serializeArray_cHeader(tab: String = ""): String = {
val prettyTab = tab + " " * 44 // Length of 'static const ...'
s"${tab}static const char * verilog_plusargs [] = {\\\n" ++
artefacts
.map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" }
.mkString("")++
s"${prettyTab}0};"
}
/* Generate C code to be included in emulator.cc that helps with
* argument parsing based on available Verilog PlusArgs */
def serialize_cHeader(): String =
s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\
|${serializeHelp_cHeader(" "*7)}
|${serializeArray_cHeader()}
|""".stripMargin
}
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
File Bundles.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import freechips.rocketchip.util._
import scala.collection.immutable.ListMap
import chisel3.util.Decoupled
import chisel3.util.DecoupledIO
import chisel3.reflect.DataMirror
abstract class TLBundleBase(val params: TLBundleParameters) extends Bundle
// common combos in lazy policy:
// Put + Acquire
// Release + AccessAck
object TLMessages
{
// A B C D E
def PutFullData = 0.U // . . => AccessAck
def PutPartialData = 1.U // . . => AccessAck
def ArithmeticData = 2.U // . . => AccessAckData
def LogicalData = 3.U // . . => AccessAckData
def Get = 4.U // . . => AccessAckData
def Hint = 5.U // . . => HintAck
def AcquireBlock = 6.U // . => Grant[Data]
def AcquirePerm = 7.U // . => Grant[Data]
def Probe = 6.U // . => ProbeAck[Data]
def AccessAck = 0.U // . .
def AccessAckData = 1.U // . .
def HintAck = 2.U // . .
def ProbeAck = 4.U // .
def ProbeAckData = 5.U // .
def Release = 6.U // . => ReleaseAck
def ReleaseData = 7.U // . => ReleaseAck
def Grant = 4.U // . => GrantAck
def GrantData = 5.U // . => GrantAck
def ReleaseAck = 6.U // .
def GrantAck = 0.U // .
def isA(x: UInt) = x <= AcquirePerm
def isB(x: UInt) = x <= Probe
def isC(x: UInt) = x <= ReleaseData
def isD(x: UInt) = x <= ReleaseAck
def adResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, Grant, Grant)
def bcResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, ProbeAck, ProbeAck)
def a = Seq( ("PutFullData",TLPermissions.PermMsgReserved),
("PutPartialData",TLPermissions.PermMsgReserved),
("ArithmeticData",TLAtomics.ArithMsg),
("LogicalData",TLAtomics.LogicMsg),
("Get",TLPermissions.PermMsgReserved),
("Hint",TLHints.HintsMsg),
("AcquireBlock",TLPermissions.PermMsgGrow),
("AcquirePerm",TLPermissions.PermMsgGrow))
def b = Seq( ("PutFullData",TLPermissions.PermMsgReserved),
("PutPartialData",TLPermissions.PermMsgReserved),
("ArithmeticData",TLAtomics.ArithMsg),
("LogicalData",TLAtomics.LogicMsg),
("Get",TLPermissions.PermMsgReserved),
("Hint",TLHints.HintsMsg),
("Probe",TLPermissions.PermMsgCap))
def c = Seq( ("AccessAck",TLPermissions.PermMsgReserved),
("AccessAckData",TLPermissions.PermMsgReserved),
("HintAck",TLPermissions.PermMsgReserved),
("Invalid Opcode",TLPermissions.PermMsgReserved),
("ProbeAck",TLPermissions.PermMsgReport),
("ProbeAckData",TLPermissions.PermMsgReport),
("Release",TLPermissions.PermMsgReport),
("ReleaseData",TLPermissions.PermMsgReport))
def d = Seq( ("AccessAck",TLPermissions.PermMsgReserved),
("AccessAckData",TLPermissions.PermMsgReserved),
("HintAck",TLPermissions.PermMsgReserved),
("Invalid Opcode",TLPermissions.PermMsgReserved),
("Grant",TLPermissions.PermMsgCap),
("GrantData",TLPermissions.PermMsgCap),
("ReleaseAck",TLPermissions.PermMsgReserved))
}
/**
* The three primary TileLink permissions are:
* (T)runk: the agent is (or is on inwards path to) the global point of serialization.
* (B)ranch: the agent is on an outwards path to
* (N)one:
* These permissions are permuted by transfer operations in various ways.
* Operations can cap permissions, request for them to be grown or shrunk,
* or for a report on their current status.
*/
object TLPermissions
{
val aWidth = 2
val bdWidth = 2
val cWidth = 3
// Cap types (Grant = new permissions, Probe = permisions <= target)
def toT = 0.U(bdWidth.W)
def toB = 1.U(bdWidth.W)
def toN = 2.U(bdWidth.W)
def isCap(x: UInt) = x <= toN
// Grow types (Acquire = permissions >= target)
def NtoB = 0.U(aWidth.W)
def NtoT = 1.U(aWidth.W)
def BtoT = 2.U(aWidth.W)
def isGrow(x: UInt) = x <= BtoT
// Shrink types (ProbeAck, Release)
def TtoB = 0.U(cWidth.W)
def TtoN = 1.U(cWidth.W)
def BtoN = 2.U(cWidth.W)
def isShrink(x: UInt) = x <= BtoN
// Report types (ProbeAck, Release)
def TtoT = 3.U(cWidth.W)
def BtoB = 4.U(cWidth.W)
def NtoN = 5.U(cWidth.W)
def isReport(x: UInt) = x <= NtoN
def PermMsgGrow:Seq[String] = Seq("Grow NtoB", "Grow NtoT", "Grow BtoT")
def PermMsgCap:Seq[String] = Seq("Cap toT", "Cap toB", "Cap toN")
def PermMsgReport:Seq[String] = Seq("Shrink TtoB", "Shrink TtoN", "Shrink BtoN", "Report TotT", "Report BtoB", "Report NtoN")
def PermMsgReserved:Seq[String] = Seq("Reserved")
}
object TLAtomics
{
val width = 3
// Arithmetic types
def MIN = 0.U(width.W)
def MAX = 1.U(width.W)
def MINU = 2.U(width.W)
def MAXU = 3.U(width.W)
def ADD = 4.U(width.W)
def isArithmetic(x: UInt) = x <= ADD
// Logical types
def XOR = 0.U(width.W)
def OR = 1.U(width.W)
def AND = 2.U(width.W)
def SWAP = 3.U(width.W)
def isLogical(x: UInt) = x <= SWAP
def ArithMsg:Seq[String] = Seq("MIN", "MAX", "MINU", "MAXU", "ADD")
def LogicMsg:Seq[String] = Seq("XOR", "OR", "AND", "SWAP")
}
object TLHints
{
val width = 1
def PREFETCH_READ = 0.U(width.W)
def PREFETCH_WRITE = 1.U(width.W)
def isHints(x: UInt) = x <= PREFETCH_WRITE
def HintsMsg:Seq[String] = Seq("PrefetchRead", "PrefetchWrite")
}
sealed trait TLChannel extends TLBundleBase {
val channelName: String
}
sealed trait TLDataChannel extends TLChannel
sealed trait TLAddrChannel extends TLDataChannel
final class TLBundleA(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleA_${params.shortName}"
val channelName = "'A' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(List(TLAtomics.width, TLPermissions.aWidth, TLHints.width).max.W) // amo_opcode || grow perms || hint
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // from
val address = UInt(params.addressBits.W) // to
val user = BundleMap(params.requestFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val mask = UInt((params.dataBits/8).W)
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleB(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleB_${params.shortName}"
val channelName = "'B' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.bdWidth.W) // cap perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // to
val address = UInt(params.addressBits.W) // from
// variable fields during multibeat:
val mask = UInt((params.dataBits/8).W)
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleC(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleC_${params.shortName}"
val channelName = "'C' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.cWidth.W) // shrink or report perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // from
val address = UInt(params.addressBits.W) // to
val user = BundleMap(params.requestFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleD(params: TLBundleParameters)
extends TLBundleBase(params) with TLDataChannel
{
override def typeName = s"TLBundleD_${params.shortName}"
val channelName = "'D' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.bdWidth.W) // cap perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // to
val sink = UInt(params.sinkBits.W) // from
val denied = Bool() // implies corrupt iff *Data
val user = BundleMap(params.responseFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleE(params: TLBundleParameters)
extends TLBundleBase(params) with TLChannel
{
override def typeName = s"TLBundleE_${params.shortName}"
val channelName = "'E' channel"
val sink = UInt(params.sinkBits.W) // to
}
class TLBundle(val params: TLBundleParameters) extends Record
{
// Emulate a Bundle with elements abcde or ad depending on params.hasBCE
private val optA = Some (Decoupled(new TLBundleA(params)))
private val optB = params.hasBCE.option(Flipped(Decoupled(new TLBundleB(params))))
private val optC = params.hasBCE.option(Decoupled(new TLBundleC(params)))
private val optD = Some (Flipped(Decoupled(new TLBundleD(params))))
private val optE = params.hasBCE.option(Decoupled(new TLBundleE(params)))
def a: DecoupledIO[TLBundleA] = optA.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleA(params)))))
def b: DecoupledIO[TLBundleB] = optB.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleB(params)))))
def c: DecoupledIO[TLBundleC] = optC.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleC(params)))))
def d: DecoupledIO[TLBundleD] = optD.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleD(params)))))
def e: DecoupledIO[TLBundleE] = optE.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleE(params)))))
val elements =
if (params.hasBCE) ListMap("e" -> e, "d" -> d, "c" -> c, "b" -> b, "a" -> a)
else ListMap("d" -> d, "a" -> a)
def tieoff(): Unit = {
DataMirror.specifiedDirectionOf(a.ready) match {
case SpecifiedDirection.Input =>
a.ready := false.B
c.ready := false.B
e.ready := false.B
b.valid := false.B
d.valid := false.B
case SpecifiedDirection.Output =>
a.valid := false.B
c.valid := false.B
e.valid := false.B
b.ready := false.B
d.ready := false.B
case _ =>
}
}
}
object TLBundle
{
def apply(params: TLBundleParameters) = new TLBundle(params)
}
class TLAsyncBundleBase(val params: TLAsyncBundleParameters) extends Bundle
class TLAsyncBundle(params: TLAsyncBundleParameters) extends TLAsyncBundleBase(params)
{
val a = new AsyncBundle(new TLBundleA(params.base), params.async)
val b = Flipped(new AsyncBundle(new TLBundleB(params.base), params.async))
val c = new AsyncBundle(new TLBundleC(params.base), params.async)
val d = Flipped(new AsyncBundle(new TLBundleD(params.base), params.async))
val e = new AsyncBundle(new TLBundleE(params.base), params.async)
}
class TLRationalBundle(params: TLBundleParameters) extends TLBundleBase(params)
{
val a = RationalIO(new TLBundleA(params))
val b = Flipped(RationalIO(new TLBundleB(params)))
val c = RationalIO(new TLBundleC(params))
val d = Flipped(RationalIO(new TLBundleD(params)))
val e = RationalIO(new TLBundleE(params))
}
class TLCreditedBundle(params: TLBundleParameters) extends TLBundleBase(params)
{
val a = CreditedIO(new TLBundleA(params))
val b = Flipped(CreditedIO(new TLBundleB(params)))
val c = CreditedIO(new TLBundleC(params))
val d = Flipped(CreditedIO(new TLBundleD(params)))
val e = CreditedIO(new TLBundleE(params))
}
File Parameters.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.diplomacy
import chisel3._
import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor}
import freechips.rocketchip.util.ShiftQueue
/** Options for describing the attributes of memory regions */
object RegionType {
// Define the 'more relaxed than' ordering
val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS)
sealed trait T extends Ordered[T] {
def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this)
}
case object CACHED extends T // an intermediate agent may have cached a copy of the region for you
case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided
case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible
case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached
case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects
case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed
case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively
}
// A non-empty half-open range; [start, end)
case class IdRange(start: Int, end: Int) extends Ordered[IdRange]
{
require (start >= 0, s"Ids cannot be negative, but got: $start.")
require (start <= end, "Id ranges cannot be negative.")
def compare(x: IdRange) = {
val primary = (this.start - x.start).signum
val secondary = (x.end - this.end).signum
if (primary != 0) primary else secondary
}
def overlaps(x: IdRange) = start < x.end && x.start < end
def contains(x: IdRange) = start <= x.start && x.end <= end
def contains(x: Int) = start <= x && x < end
def contains(x: UInt) =
if (size == 0) {
false.B
} else if (size == 1) { // simple comparison
x === start.U
} else {
// find index of largest different bit
val largestDeltaBit = log2Floor(start ^ (end-1))
val smallestCommonBit = largestDeltaBit + 1 // may not exist in x
val uncommonMask = (1 << smallestCommonBit) - 1
val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0)
// the prefix must match exactly (note: may shift ALL bits away)
(x >> smallestCommonBit) === (start >> smallestCommonBit).U &&
// firrtl constant prop range analysis can eliminate these two:
(start & uncommonMask).U <= uncommonBits &&
uncommonBits <= ((end-1) & uncommonMask).U
}
def shift(x: Int) = IdRange(start+x, end+x)
def size = end - start
def isEmpty = end == start
def range = start until end
}
object IdRange
{
def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else {
val ranges = s.sorted
(ranges.tail zip ranges.init) find { case (a, b) => a overlaps b }
}
}
// An potentially empty inclusive range of 2-powers [min, max] (in bytes)
case class TransferSizes(min: Int, max: Int)
{
def this(x: Int) = this(x, x)
require (min <= max, s"Min transfer $min > max transfer $max")
require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)")
require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max")
require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min")
require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)")
def none = min == 0
def contains(x: Int) = isPow2(x) && min <= x && x <= max
def containsLg(x: Int) = contains(1 << x)
def containsLg(x: UInt) =
if (none) false.B
else if (min == max) { log2Ceil(min).U === x }
else { log2Ceil(min).U <= x && x <= log2Ceil(max).U }
def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max)
def intersect(x: TransferSizes) =
if (x.max < min || max < x.min) TransferSizes.none
else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max))
// Not a union, because the result may contain sizes contained by neither term
// NOT TO BE CONFUSED WITH COVERPOINTS
def mincover(x: TransferSizes) = {
if (none) {
x
} else if (x.none) {
this
} else {
TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max))
}
}
override def toString() = "TransferSizes[%d, %d]".format(min, max)
}
object TransferSizes {
def apply(x: Int) = new TransferSizes(x)
val none = new TransferSizes(0)
def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _)
def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _)
implicit def asBool(x: TransferSizes) = !x.none
}
// AddressSets specify the address space managed by the manager
// Base is the base address, and mask are the bits consumed by the manager
// e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff
// e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ...
case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet]
{
// Forbid misaligned base address (and empty sets)
require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}")
require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous
// We do allow negative mask (=> ignore all high bits)
def contains(x: BigInt) = ((x ^ base) & ~mask) == 0
def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S
// turn x into an address contained in this set
def legalize(x: UInt): UInt = base.U | (mask.U & x)
// overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1)
def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0
// contains iff bitwise: x.mask => mask && contains(x.base)
def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0
// The number of bytes to which the manager must be aligned
def alignment = ((mask + 1) & ~mask)
// Is this a contiguous memory range
def contiguous = alignment == mask+1
def finite = mask >= 0
def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask }
// Widen the match function to ignore all bits in imask
def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask)
// Return an AddressSet that only contains the addresses both sets contain
def intersect(x: AddressSet): Option[AddressSet] = {
if (!overlaps(x)) {
None
} else {
val r_mask = mask & x.mask
val r_base = base | x.base
Some(AddressSet(r_base, r_mask))
}
}
def subtract(x: AddressSet): Seq[AddressSet] = {
intersect(x) match {
case None => Seq(this)
case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit =>
val nmask = (mask & (bit-1)) | remove.mask
val nbase = (remove.base ^ bit) & ~nmask
AddressSet(nbase, nmask)
}
}
}
// AddressSets have one natural Ordering (the containment order, if contiguous)
def compare(x: AddressSet) = {
val primary = (this.base - x.base).signum // smallest address first
val secondary = (x.mask - this.mask).signum // largest mask first
if (primary != 0) primary else secondary
}
// We always want to see things in hex
override def toString() = {
if (mask >= 0) {
"AddressSet(0x%x, 0x%x)".format(base, mask)
} else {
"AddressSet(0x%x, ~0x%x)".format(base, ~mask)
}
}
def toRanges = {
require (finite, "Ranges cannot be calculated on infinite mask")
val size = alignment
val fragments = mask & ~(size-1)
val bits = bitIndexes(fragments)
(BigInt(0) until (BigInt(1) << bits.size)).map { i =>
val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) }
AddressRange(off, size)
}
}
}
object AddressSet
{
val everything = AddressSet(0, -1)
def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = {
if (size == 0) tail.reverse else {
val maxBaseAlignment = base & (-base) // 0 for infinite (LSB)
val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size
val step =
if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment)
maxSizeAlignment else maxBaseAlignment
misaligned(base+step, size-step, AddressSet(base, step-1) +: tail)
}
}
def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = {
// Pair terms up by ignoring 'bit'
seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) =>
if (seq.size == 1) {
seq.head // singleton -> unaffected
} else {
key.copy(mask = key.mask | bit) // pair - widen mask by bit
}
}.toList
}
def unify(seq: Seq[AddressSet]): Seq[AddressSet] = {
val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _)
AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted
}
def enumerateMask(mask: BigInt): Seq[BigInt] = {
def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] =
if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail)
helper(0, Nil)
}
def enumerateBits(mask: BigInt): Seq[BigInt] = {
def helper(x: BigInt): Seq[BigInt] = {
if (x == 0) {
Nil
} else {
val bit = x & (-x)
bit +: helper(x & ~bit)
}
}
helper(mask)
}
}
case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean)
{
require (depth >= 0, "Buffer depth must be >= 0")
def isDefined = depth > 0
def latency = if (isDefined && !flow) 1 else 0
def apply[T <: Data](x: DecoupledIO[T]) =
if (isDefined) Queue(x, depth, flow=flow, pipe=pipe)
else x
def irrevocable[T <: Data](x: ReadyValidIO[T]) =
if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe)
else x
def sq[T <: Data](x: DecoupledIO[T]) =
if (!isDefined) x else {
val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe))
sq.io.enq <> x
sq.io.deq
}
override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "")
}
object BufferParams
{
implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false)
val default = BufferParams(2)
val none = BufferParams(0)
val flow = BufferParams(1, true, false)
val pipe = BufferParams(1, false, true)
}
case class TriStateValue(value: Boolean, set: Boolean)
{
def update(orig: Boolean) = if (set) value else orig
}
object TriStateValue
{
implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true)
def unset = TriStateValue(false, false)
}
trait DirectedBuffers[T] {
def copyIn(x: BufferParams): T
def copyOut(x: BufferParams): T
def copyInOut(x: BufferParams): T
}
trait IdMapEntry {
def name: String
def from: IdRange
def to: IdRange
def isCache: Boolean
def requestFifo: Boolean
def maxTransactionsInFlight: Option[Int]
def pretty(fmt: String) =
if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5
fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
} else {
fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
}
}
abstract class IdMap[T <: IdMapEntry] {
protected val fmt: String
val mapping: Seq[T]
def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n")
}
File Edges.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
class TLEdge(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdgeParameters(client, manager, params, sourceInfo)
{
def isAligned(address: UInt, lgSize: UInt): Bool = {
if (maxLgSize == 0) true.B else {
val mask = UIntToOH1(lgSize, maxLgSize)
(address & mask) === 0.U
}
}
def mask(address: UInt, lgSize: UInt): UInt =
MaskGen(address, lgSize, manager.beatBytes)
def staticHasData(bundle: TLChannel): Option[Boolean] = {
bundle match {
case _:TLBundleA => {
// Do there exist A messages with Data?
val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial
// Do there exist A messages without Data?
val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint
// Statically optimize the case where hasData is a constant
if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None
}
case _:TLBundleB => {
// Do there exist B messages with Data?
val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial
// Do there exist B messages without Data?
val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint
// Statically optimize the case where hasData is a constant
if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None
}
case _:TLBundleC => {
// Do there eixst C messages with Data?
val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe
// Do there exist C messages without Data?
val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe
if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None
}
case _:TLBundleD => {
// Do there eixst D messages with Data?
val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB
// Do there exist D messages without Data?
val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT
if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None
}
case _:TLBundleE => Some(false)
}
}
def isRequest(x: TLChannel): Bool = {
x match {
case a: TLBundleA => true.B
case b: TLBundleB => true.B
case c: TLBundleC => c.opcode(2) && c.opcode(1)
// opcode === TLMessages.Release ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(2) && !d.opcode(1)
// opcode === TLMessages.Grant ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
}
def isResponse(x: TLChannel): Bool = {
x match {
case a: TLBundleA => false.B
case b: TLBundleB => false.B
case c: TLBundleC => !c.opcode(2) || !c.opcode(1)
// opcode =/= TLMessages.Release &&
// opcode =/= TLMessages.ReleaseData
case d: TLBundleD => true.B // Grant isResponse + isRequest
case e: TLBundleE => true.B
}
}
def hasData(x: TLChannel): Bool = {
val opdata = x match {
case a: TLBundleA => !a.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case b: TLBundleB => !b.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case c: TLBundleC => c.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.ProbeAckData ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
staticHasData(x).map(_.B).getOrElse(opdata)
}
def opcode(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.opcode
case b: TLBundleB => b.opcode
case c: TLBundleC => c.opcode
case d: TLBundleD => d.opcode
}
}
def param(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.param
case b: TLBundleB => b.param
case c: TLBundleC => c.param
case d: TLBundleD => d.param
}
}
def size(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.size
case b: TLBundleB => b.size
case c: TLBundleC => c.size
case d: TLBundleD => d.size
}
}
def data(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.data
case b: TLBundleB => b.data
case c: TLBundleC => c.data
case d: TLBundleD => d.data
}
}
def corrupt(x: TLDataChannel): Bool = {
x match {
case a: TLBundleA => a.corrupt
case b: TLBundleB => b.corrupt
case c: TLBundleC => c.corrupt
case d: TLBundleD => d.corrupt
}
}
def mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.mask
case b: TLBundleB => b.mask
case c: TLBundleC => mask(c.address, c.size)
}
}
def full_mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => mask(a.address, a.size)
case b: TLBundleB => mask(b.address, b.size)
case c: TLBundleC => mask(c.address, c.size)
}
}
def address(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.address
case b: TLBundleB => b.address
case c: TLBundleC => c.address
}
}
def source(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.source
case b: TLBundleB => b.source
case c: TLBundleC => c.source
case d: TLBundleD => d.source
}
}
def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes)
def addr_lo(x: UInt): UInt =
if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0)
def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x))
def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x))
def numBeats(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 1.U
case bundle: TLDataChannel => {
val hasData = this.hasData(bundle)
val size = this.size(bundle)
val cutoff = log2Ceil(manager.beatBytes)
val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U
val decode = UIntToOH(size, maxLgSize+1) >> cutoff
Mux(hasData, decode | small.asUInt, 1.U)
}
}
}
def numBeats1(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 0.U
case bundle: TLDataChannel => {
if (maxLgSize == 0) {
0.U
} else {
val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes)
Mux(hasData(bundle), decode, 0.U)
}
}
}
}
def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val beats1 = numBeats1(bits)
val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W))
val counter1 = counter - 1.U
val first = counter === 0.U
val last = counter === 1.U || beats1 === 0.U
val done = last && fire
val count = (beats1 & ~counter1)
when (fire) {
counter := Mux(first, beats1, counter1)
}
(first, last, done, count)
}
def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1
def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire)
def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid)
def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2
def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire)
def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid)
def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3
def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire)
def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid)
def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3)
}
def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire)
def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid)
def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4)
}
def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire)
def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid)
def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes))
}
def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire)
def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid)
// Does the request need T permissions to be executed?
def needT(a: TLBundleA): Bool = {
val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLPermissions.NtoB -> false.B,
TLPermissions.NtoT -> true.B,
TLPermissions.BtoT -> true.B))
MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array(
TLMessages.PutFullData -> true.B,
TLMessages.PutPartialData -> true.B,
TLMessages.ArithmeticData -> true.B,
TLMessages.LogicalData -> true.B,
TLMessages.Get -> false.B,
TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLHints.PREFETCH_READ -> false.B,
TLHints.PREFETCH_WRITE -> true.B)),
TLMessages.AcquireBlock -> acq_needT,
TLMessages.AcquirePerm -> acq_needT))
}
// This is a very expensive circuit; use only if you really mean it!
def inFlight(x: TLBundle): (UInt, UInt) = {
val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W))
val bce = manager.anySupportAcquireB && client.anySupportProbe
val (a_first, a_last, _) = firstlast(x.a)
val (b_first, b_last, _) = firstlast(x.b)
val (c_first, c_last, _) = firstlast(x.c)
val (d_first, d_last, _) = firstlast(x.d)
val (e_first, e_last, _) = firstlast(x.e)
val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits))
val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits))
val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits))
val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits))
val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits))
val a_inc = x.a.fire && a_first && a_request
val b_inc = x.b.fire && b_first && b_request
val c_inc = x.c.fire && c_first && c_request
val d_inc = x.d.fire && d_first && d_request
val e_inc = x.e.fire && e_first && e_request
val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil))
val a_dec = x.a.fire && a_last && a_response
val b_dec = x.b.fire && b_last && b_response
val c_dec = x.c.fire && c_last && c_response
val d_dec = x.d.fire && d_last && d_response
val e_dec = x.e.fire && e_last && e_response
val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil))
val next_flight = flight + PopCount(inc) - PopCount(dec)
flight := next_flight
(flight, next_flight)
}
def prettySourceMapping(context: String): String = {
s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n"
}
}
class TLEdgeOut(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
// Transfers
def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquireBlock
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquirePerm
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.Release
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ReleaseData
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) =
Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B)
def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAck
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions, data)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAckData
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B)
def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink)
def GrantAck(toSink: UInt): TLBundleE = {
val e = Wire(new TLBundleE(bundle))
e.sink := toSink
e
}
// Accesses
def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsGetFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Get
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutFullFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutFullData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, mask, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutPartialFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutPartialData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = {
require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsArithmeticFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.ArithmeticData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsLogicalFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.LogicalData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = {
require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsHintFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Hint
a.param := param
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data)
def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAckData
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size)
def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.HintAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
}
class TLEdgeIn(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = {
val todo = x.filter(!_.isEmpty)
val heads = todo.map(_.head)
val tails = todo.map(_.tail)
if (todo.isEmpty) Nil else { heads +: myTranspose(tails) }
}
// Transfers
def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = {
require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}")
val legal = client.supportsProbe(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Probe
b.param := capPermissions
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.Grant
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.GrantData
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B)
def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.ReleaseAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
// Accesses
def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = {
require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsGet(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Get
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsPutFull(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutFullData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, mask, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsPutPartial(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutPartialData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsArithmetic(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.ArithmeticData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsLogical(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.LogicalData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = {
require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsHint(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Hint
b.param := param
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size)
def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied)
def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data)
def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAckData
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B)
def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied)
def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B)
def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.HintAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
}
| module TLMonitor_4( // @[Monitor.scala:36:7]
input clock, // @[Monitor.scala:36:7]
input reset, // @[Monitor.scala:36:7]
input io_in_a_ready, // @[Monitor.scala:20:14]
input io_in_a_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_size, // @[Monitor.scala:20:14]
input [6:0] io_in_a_bits_source, // @[Monitor.scala:20:14]
input [28:0] io_in_a_bits_address, // @[Monitor.scala:20:14]
input [7:0] io_in_a_bits_mask, // @[Monitor.scala:20:14]
input [63:0] io_in_a_bits_data, // @[Monitor.scala:20:14]
input io_in_a_bits_corrupt, // @[Monitor.scala:20:14]
input io_in_d_ready, // @[Monitor.scala:20:14]
input io_in_d_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14]
input [1:0] io_in_d_bits_param, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_size, // @[Monitor.scala:20:14]
input [6:0] io_in_d_bits_source, // @[Monitor.scala:20:14]
input io_in_d_bits_sink, // @[Monitor.scala:20:14]
input io_in_d_bits_denied, // @[Monitor.scala:20:14]
input [63:0] io_in_d_bits_data, // @[Monitor.scala:20:14]
input io_in_d_bits_corrupt // @[Monitor.scala:20:14]
);
wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11]
wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11]
wire io_in_a_ready_0 = io_in_a_ready; // @[Monitor.scala:36:7]
wire io_in_a_valid_0 = io_in_a_valid; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_opcode_0 = io_in_a_bits_opcode; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_param_0 = io_in_a_bits_param; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_size_0 = io_in_a_bits_size; // @[Monitor.scala:36:7]
wire [6:0] io_in_a_bits_source_0 = io_in_a_bits_source; // @[Monitor.scala:36:7]
wire [28:0] io_in_a_bits_address_0 = io_in_a_bits_address; // @[Monitor.scala:36:7]
wire [7:0] io_in_a_bits_mask_0 = io_in_a_bits_mask; // @[Monitor.scala:36:7]
wire [63:0] io_in_a_bits_data_0 = io_in_a_bits_data; // @[Monitor.scala:36:7]
wire io_in_a_bits_corrupt_0 = io_in_a_bits_corrupt; // @[Monitor.scala:36:7]
wire io_in_d_ready_0 = io_in_d_ready; // @[Monitor.scala:36:7]
wire io_in_d_valid_0 = io_in_d_valid; // @[Monitor.scala:36:7]
wire [2:0] io_in_d_bits_opcode_0 = io_in_d_bits_opcode; // @[Monitor.scala:36:7]
wire [1:0] io_in_d_bits_param_0 = io_in_d_bits_param; // @[Monitor.scala:36:7]
wire [2:0] io_in_d_bits_size_0 = io_in_d_bits_size; // @[Monitor.scala:36:7]
wire [6:0] io_in_d_bits_source_0 = io_in_d_bits_source; // @[Monitor.scala:36:7]
wire io_in_d_bits_sink_0 = io_in_d_bits_sink; // @[Monitor.scala:36:7]
wire io_in_d_bits_denied_0 = io_in_d_bits_denied; // @[Monitor.scala:36:7]
wire [63:0] io_in_d_bits_data_0 = io_in_d_bits_data; // @[Monitor.scala:36:7]
wire io_in_d_bits_corrupt_0 = io_in_d_bits_corrupt; // @[Monitor.scala:36:7]
wire sink_ok = 1'h0; // @[Monitor.scala:309:31]
wire _c_first_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_T = 1'h0; // @[Decoupled.scala:51:35]
wire c_first_beats1_opdata = 1'h0; // @[Edges.scala:102:36]
wire _c_first_last_T = 1'h0; // @[Edges.scala:232:25]
wire c_first_done = 1'h0; // @[Edges.scala:233:22]
wire _c_set_wo_ready_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_wo_ready_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_wo_ready_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_T = 1'h0; // @[Monitor.scala:772:47]
wire _c_probe_ack_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_T_1 = 1'h0; // @[Monitor.scala:772:95]
wire c_probe_ack = 1'h0; // @[Monitor.scala:772:71]
wire _same_cycle_resp_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_T_3 = 1'h0; // @[Monitor.scala:795:44]
wire _same_cycle_resp_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_T_4 = 1'h0; // @[Edges.scala:68:36]
wire _same_cycle_resp_T_5 = 1'h0; // @[Edges.scala:68:51]
wire _same_cycle_resp_T_6 = 1'h0; // @[Edges.scala:68:40]
wire _same_cycle_resp_T_7 = 1'h0; // @[Monitor.scala:795:55]
wire _same_cycle_resp_WIRE_4_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_4_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_4_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_5_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_5_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_5_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire same_cycle_resp_1 = 1'h0; // @[Monitor.scala:795:88]
wire [2:0] responseMap_0 = 3'h0; // @[Monitor.scala:643:42]
wire [2:0] responseMap_1 = 3'h0; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_0 = 3'h0; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_1 = 3'h0; // @[Monitor.scala:644:42]
wire [2:0] _c_first_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] c_first_beats1_decode = 3'h0; // @[Edges.scala:220:59]
wire [2:0] c_first_beats1 = 3'h0; // @[Edges.scala:221:14]
wire [2:0] _c_first_count_T = 3'h0; // @[Edges.scala:234:27]
wire [2:0] c_first_count = 3'h0; // @[Edges.scala:234:25]
wire [2:0] _c_first_counter_T = 3'h0; // @[Edges.scala:236:21]
wire [2:0] _c_set_wo_ready_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_4_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_4_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_4_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_5_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_5_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_5_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire _source_ok_T_3 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_5 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_9 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_11 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_15 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_17 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_21 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_23 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_39 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_41 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_45 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_47 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_51 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_53 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_57 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_59 = 1'h1; // @[Parameters.scala:57:20]
wire c_first = 1'h1; // @[Edges.scala:231:25]
wire _c_first_last_T_1 = 1'h1; // @[Edges.scala:232:43]
wire c_first_last = 1'h1; // @[Edges.scala:232:33]
wire [2:0] c_first_counter1 = 3'h7; // @[Edges.scala:230:28]
wire [3:0] _c_first_counter1_T = 4'hF; // @[Edges.scala:230:28]
wire [63:0] _c_first_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_first_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_first_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_first_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_set_wo_ready_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_set_wo_ready_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_opcodes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_opcodes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_sizes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_sizes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_opcodes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_opcodes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_sizes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_sizes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_probe_ack_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_probe_ack_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_probe_ack_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_probe_ack_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_4_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_5_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [28:0] _c_first_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74]
wire [28:0] _c_first_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61]
wire [28:0] _c_first_WIRE_2_bits_address = 29'h0; // @[Bundles.scala:265:74]
wire [28:0] _c_first_WIRE_3_bits_address = 29'h0; // @[Bundles.scala:265:61]
wire [28:0] _c_set_wo_ready_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74]
wire [28:0] _c_set_wo_ready_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61]
wire [28:0] _c_set_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74]
wire [28:0] _c_set_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61]
wire [28:0] _c_opcodes_set_interm_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74]
wire [28:0] _c_opcodes_set_interm_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61]
wire [28:0] _c_sizes_set_interm_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74]
wire [28:0] _c_sizes_set_interm_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61]
wire [28:0] _c_opcodes_set_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74]
wire [28:0] _c_opcodes_set_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61]
wire [28:0] _c_sizes_set_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74]
wire [28:0] _c_sizes_set_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61]
wire [28:0] _c_probe_ack_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74]
wire [28:0] _c_probe_ack_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61]
wire [28:0] _c_probe_ack_WIRE_2_bits_address = 29'h0; // @[Bundles.scala:265:74]
wire [28:0] _c_probe_ack_WIRE_3_bits_address = 29'h0; // @[Bundles.scala:265:61]
wire [28:0] _same_cycle_resp_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74]
wire [28:0] _same_cycle_resp_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61]
wire [28:0] _same_cycle_resp_WIRE_2_bits_address = 29'h0; // @[Bundles.scala:265:74]
wire [28:0] _same_cycle_resp_WIRE_3_bits_address = 29'h0; // @[Bundles.scala:265:61]
wire [28:0] _same_cycle_resp_WIRE_4_bits_address = 29'h0; // @[Bundles.scala:265:74]
wire [28:0] _same_cycle_resp_WIRE_5_bits_address = 29'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_first_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_first_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_first_WIRE_2_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_first_WIRE_3_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_set_wo_ready_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_set_wo_ready_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_set_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_set_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_opcodes_set_interm_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_opcodes_set_interm_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_sizes_set_interm_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_sizes_set_interm_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_opcodes_set_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_opcodes_set_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_sizes_set_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_sizes_set_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_probe_ack_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_probe_ack_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_probe_ack_WIRE_2_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_probe_ack_WIRE_3_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _same_cycle_resp_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _same_cycle_resp_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _same_cycle_resp_WIRE_2_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _same_cycle_resp_WIRE_3_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _same_cycle_resp_WIRE_4_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _same_cycle_resp_WIRE_5_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [15:0] _a_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _a_size_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _d_opcodes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _d_sizes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _c_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _c_size_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _d_opcodes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _d_sizes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57]
wire [16:0] _a_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _a_size_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _d_opcodes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _d_sizes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _c_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _c_size_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _d_opcodes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _d_sizes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57]
wire [15:0] _a_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _a_size_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _d_opcodes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _d_sizes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _c_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _c_size_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _d_opcodes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _d_sizes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51]
wire [1026:0] _c_opcodes_set_T_1 = 1027'h0; // @[Monitor.scala:767:54]
wire [1026:0] _c_sizes_set_T_1 = 1027'h0; // @[Monitor.scala:768:52]
wire [9:0] _c_opcodes_set_T = 10'h0; // @[Monitor.scala:767:79]
wire [9:0] _c_sizes_set_T = 10'h0; // @[Monitor.scala:768:77]
wire [3:0] _c_opcodes_set_interm_T_1 = 4'h1; // @[Monitor.scala:765:61]
wire [3:0] _c_sizes_set_interm_T_1 = 4'h1; // @[Monitor.scala:766:59]
wire [3:0] c_opcodes_set_interm = 4'h0; // @[Monitor.scala:754:40]
wire [3:0] c_sizes_set_interm = 4'h0; // @[Monitor.scala:755:40]
wire [3:0] _c_opcodes_set_interm_T = 4'h0; // @[Monitor.scala:765:53]
wire [3:0] _c_sizes_set_interm_T = 4'h0; // @[Monitor.scala:766:51]
wire [127:0] _c_set_wo_ready_T = 128'h1; // @[OneHot.scala:58:35]
wire [127:0] _c_set_T = 128'h1; // @[OneHot.scala:58:35]
wire [259:0] c_opcodes_set = 260'h0; // @[Monitor.scala:740:34]
wire [259:0] c_sizes_set = 260'h0; // @[Monitor.scala:741:34]
wire [64:0] c_set = 65'h0; // @[Monitor.scala:738:34]
wire [64:0] c_set_wo_ready = 65'h0; // @[Monitor.scala:739:34]
wire [5:0] _c_first_beats1_decode_T_2 = 6'h0; // @[package.scala:243:46]
wire [5:0] _c_first_beats1_decode_T_1 = 6'h3F; // @[package.scala:243:76]
wire [12:0] _c_first_beats1_decode_T = 13'h3F; // @[package.scala:243:71]
wire [2:0] responseMap_6 = 3'h4; // @[Monitor.scala:643:42]
wire [2:0] responseMap_7 = 3'h4; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_7 = 3'h4; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_6 = 3'h5; // @[Monitor.scala:644:42]
wire [2:0] responseMap_5 = 3'h2; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_5 = 3'h2; // @[Monitor.scala:644:42]
wire [2:0] responseMap_2 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMap_3 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMap_4 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_2 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_3 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_4 = 3'h1; // @[Monitor.scala:644:42]
wire [3:0] _a_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:637:123]
wire [3:0] _a_size_lookup_T_2 = 4'h4; // @[Monitor.scala:641:117]
wire [3:0] _d_opcodes_clr_T = 4'h4; // @[Monitor.scala:680:48]
wire [3:0] _d_sizes_clr_T = 4'h4; // @[Monitor.scala:681:48]
wire [3:0] _c_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:749:123]
wire [3:0] _c_size_lookup_T_2 = 4'h4; // @[Monitor.scala:750:119]
wire [3:0] _d_opcodes_clr_T_6 = 4'h4; // @[Monitor.scala:790:48]
wire [3:0] _d_sizes_clr_T_6 = 4'h4; // @[Monitor.scala:791:48]
wire [2:0] _mask_sizeOH_T = io_in_a_bits_size_0; // @[Misc.scala:202:34]
wire [6:0] _source_ok_uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_4 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_5 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_6 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_7 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_8 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_9 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_10 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_11 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_12 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_13 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_14 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_15 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_16 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_17 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_18 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_19 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_20 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_21 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_22 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_23 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_24 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_25 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_26 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_27 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_28 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_29 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_30 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_31 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_32 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_33 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_34 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_35 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_36 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_37 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_38 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_39 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_40 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_41 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_42 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_43 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_4 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_5 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_6 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_7 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire _source_ok_T = io_in_a_bits_source_0 == 7'h10; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_0 = _source_ok_T; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits = _source_ok_uncommonBits_T[1:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] _source_ok_T_1 = io_in_a_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_7 = io_in_a_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_13 = io_in_a_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_19 = io_in_a_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire _source_ok_T_2 = _source_ok_T_1 == 5'h0; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_4 = _source_ok_T_2; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_6 = _source_ok_T_4; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1 = _source_ok_T_6; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_1 = _source_ok_uncommonBits_T_1[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_8 = _source_ok_T_7 == 5'h1; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_10 = _source_ok_T_8; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_12 = _source_ok_T_10; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_2 = _source_ok_T_12; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_2 = _source_ok_uncommonBits_T_2[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_14 = _source_ok_T_13 == 5'h2; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_16 = _source_ok_T_14; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_18 = _source_ok_T_16; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_3 = _source_ok_T_18; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_3 = _source_ok_uncommonBits_T_3[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_20 = _source_ok_T_19 == 5'h3; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_22 = _source_ok_T_20; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_24 = _source_ok_T_22; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_4 = _source_ok_T_24; // @[Parameters.scala:1138:31]
wire _source_ok_T_25 = io_in_a_bits_source_0 == 7'h20; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_5 = _source_ok_T_25; // @[Parameters.scala:1138:31]
wire _source_ok_T_26 = io_in_a_bits_source_0 == 7'h21; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_6 = _source_ok_T_26; // @[Parameters.scala:1138:31]
wire _source_ok_T_27 = io_in_a_bits_source_0 == 7'h22; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_7 = _source_ok_T_27; // @[Parameters.scala:1138:31]
wire _source_ok_T_28 = io_in_a_bits_source_0 == 7'h40; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_8 = _source_ok_T_28; // @[Parameters.scala:1138:31]
wire _source_ok_T_29 = _source_ok_WIRE_0 | _source_ok_WIRE_1; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_30 = _source_ok_T_29 | _source_ok_WIRE_2; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_31 = _source_ok_T_30 | _source_ok_WIRE_3; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_32 = _source_ok_T_31 | _source_ok_WIRE_4; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_33 = _source_ok_T_32 | _source_ok_WIRE_5; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_34 = _source_ok_T_33 | _source_ok_WIRE_6; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_35 = _source_ok_T_34 | _source_ok_WIRE_7; // @[Parameters.scala:1138:31, :1139:46]
wire source_ok = _source_ok_T_35 | _source_ok_WIRE_8; // @[Parameters.scala:1138:31, :1139:46]
wire [12:0] _GEN = 13'h3F << io_in_a_bits_size_0; // @[package.scala:243:71]
wire [12:0] _is_aligned_mask_T; // @[package.scala:243:71]
assign _is_aligned_mask_T = _GEN; // @[package.scala:243:71]
wire [12:0] _a_first_beats1_decode_T; // @[package.scala:243:71]
assign _a_first_beats1_decode_T = _GEN; // @[package.scala:243:71]
wire [12:0] _a_first_beats1_decode_T_3; // @[package.scala:243:71]
assign _a_first_beats1_decode_T_3 = _GEN; // @[package.scala:243:71]
wire [5:0] _is_aligned_mask_T_1 = _is_aligned_mask_T[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] is_aligned_mask = ~_is_aligned_mask_T_1; // @[package.scala:243:{46,76}]
wire [28:0] _is_aligned_T = {23'h0, io_in_a_bits_address_0[5:0] & is_aligned_mask}; // @[package.scala:243:46]
wire is_aligned = _is_aligned_T == 29'h0; // @[Edges.scala:21:{16,24}]
wire [1:0] mask_sizeOH_shiftAmount = _mask_sizeOH_T[1:0]; // @[OneHot.scala:64:49]
wire [3:0] _mask_sizeOH_T_1 = 4'h1 << mask_sizeOH_shiftAmount; // @[OneHot.scala:64:49, :65:12]
wire [2:0] _mask_sizeOH_T_2 = _mask_sizeOH_T_1[2:0]; // @[OneHot.scala:65:{12,27}]
wire [2:0] mask_sizeOH = {_mask_sizeOH_T_2[2:1], 1'h1}; // @[OneHot.scala:65:27]
wire mask_sub_sub_sub_0_1 = io_in_a_bits_size_0 > 3'h2; // @[Misc.scala:206:21]
wire mask_sub_sub_size = mask_sizeOH[2]; // @[Misc.scala:202:81, :209:26]
wire mask_sub_sub_bit = io_in_a_bits_address_0[2]; // @[Misc.scala:210:26]
wire mask_sub_sub_1_2 = mask_sub_sub_bit; // @[Misc.scala:210:26, :214:27]
wire mask_sub_sub_nbit = ~mask_sub_sub_bit; // @[Misc.scala:210:26, :211:20]
wire mask_sub_sub_0_2 = mask_sub_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_sub_acc_T = mask_sub_sub_size & mask_sub_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_sub_0_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T; // @[Misc.scala:206:21, :215:{29,38}]
wire _mask_sub_sub_acc_T_1 = mask_sub_sub_size & mask_sub_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_sub_1_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T_1; // @[Misc.scala:206:21, :215:{29,38}]
wire mask_sub_size = mask_sizeOH[1]; // @[Misc.scala:202:81, :209:26]
wire mask_sub_bit = io_in_a_bits_address_0[1]; // @[Misc.scala:210:26]
wire mask_sub_nbit = ~mask_sub_bit; // @[Misc.scala:210:26, :211:20]
wire mask_sub_0_2 = mask_sub_sub_0_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_acc_T = mask_sub_size & mask_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_0_1 = mask_sub_sub_0_1 | _mask_sub_acc_T; // @[Misc.scala:215:{29,38}]
wire mask_sub_1_2 = mask_sub_sub_0_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_sub_acc_T_1 = mask_sub_size & mask_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_1_1 = mask_sub_sub_0_1 | _mask_sub_acc_T_1; // @[Misc.scala:215:{29,38}]
wire mask_sub_2_2 = mask_sub_sub_1_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_acc_T_2 = mask_sub_size & mask_sub_2_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_2_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_2; // @[Misc.scala:215:{29,38}]
wire mask_sub_3_2 = mask_sub_sub_1_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_sub_acc_T_3 = mask_sub_size & mask_sub_3_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_3_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_3; // @[Misc.scala:215:{29,38}]
wire mask_size = mask_sizeOH[0]; // @[Misc.scala:202:81, :209:26]
wire mask_bit = io_in_a_bits_address_0[0]; // @[Misc.scala:210:26]
wire mask_nbit = ~mask_bit; // @[Misc.scala:210:26, :211:20]
wire mask_eq = mask_sub_0_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T = mask_size & mask_eq; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc = mask_sub_0_1 | _mask_acc_T; // @[Misc.scala:215:{29,38}]
wire mask_eq_1 = mask_sub_0_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_1 = mask_size & mask_eq_1; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_1 = mask_sub_0_1 | _mask_acc_T_1; // @[Misc.scala:215:{29,38}]
wire mask_eq_2 = mask_sub_1_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_2 = mask_size & mask_eq_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_2 = mask_sub_1_1 | _mask_acc_T_2; // @[Misc.scala:215:{29,38}]
wire mask_eq_3 = mask_sub_1_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_3 = mask_size & mask_eq_3; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_3 = mask_sub_1_1 | _mask_acc_T_3; // @[Misc.scala:215:{29,38}]
wire mask_eq_4 = mask_sub_2_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_4 = mask_size & mask_eq_4; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_4 = mask_sub_2_1 | _mask_acc_T_4; // @[Misc.scala:215:{29,38}]
wire mask_eq_5 = mask_sub_2_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_5 = mask_size & mask_eq_5; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_5 = mask_sub_2_1 | _mask_acc_T_5; // @[Misc.scala:215:{29,38}]
wire mask_eq_6 = mask_sub_3_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_6 = mask_size & mask_eq_6; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_6 = mask_sub_3_1 | _mask_acc_T_6; // @[Misc.scala:215:{29,38}]
wire mask_eq_7 = mask_sub_3_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_7 = mask_size & mask_eq_7; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_7 = mask_sub_3_1 | _mask_acc_T_7; // @[Misc.scala:215:{29,38}]
wire [1:0] mask_lo_lo = {mask_acc_1, mask_acc}; // @[Misc.scala:215:29, :222:10]
wire [1:0] mask_lo_hi = {mask_acc_3, mask_acc_2}; // @[Misc.scala:215:29, :222:10]
wire [3:0] mask_lo = {mask_lo_hi, mask_lo_lo}; // @[Misc.scala:222:10]
wire [1:0] mask_hi_lo = {mask_acc_5, mask_acc_4}; // @[Misc.scala:215:29, :222:10]
wire [1:0] mask_hi_hi = {mask_acc_7, mask_acc_6}; // @[Misc.scala:215:29, :222:10]
wire [3:0] mask_hi = {mask_hi_hi, mask_hi_lo}; // @[Misc.scala:222:10]
wire [7:0] mask = {mask_hi, mask_lo}; // @[Misc.scala:222:10]
wire [1:0] uncommonBits = _uncommonBits_T[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_1 = _uncommonBits_T_1[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_2 = _uncommonBits_T_2[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_3 = _uncommonBits_T_3[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_4 = _uncommonBits_T_4[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_5 = _uncommonBits_T_5[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_6 = _uncommonBits_T_6[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_7 = _uncommonBits_T_7[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_8 = _uncommonBits_T_8[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_9 = _uncommonBits_T_9[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_10 = _uncommonBits_T_10[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_11 = _uncommonBits_T_11[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_12 = _uncommonBits_T_12[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_13 = _uncommonBits_T_13[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_14 = _uncommonBits_T_14[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_15 = _uncommonBits_T_15[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_16 = _uncommonBits_T_16[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_17 = _uncommonBits_T_17[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_18 = _uncommonBits_T_18[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_19 = _uncommonBits_T_19[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_20 = _uncommonBits_T_20[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_21 = _uncommonBits_T_21[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_22 = _uncommonBits_T_22[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_23 = _uncommonBits_T_23[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_24 = _uncommonBits_T_24[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_25 = _uncommonBits_T_25[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_26 = _uncommonBits_T_26[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_27 = _uncommonBits_T_27[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_28 = _uncommonBits_T_28[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_29 = _uncommonBits_T_29[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_30 = _uncommonBits_T_30[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_31 = _uncommonBits_T_31[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_32 = _uncommonBits_T_32[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_33 = _uncommonBits_T_33[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_34 = _uncommonBits_T_34[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_35 = _uncommonBits_T_35[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_36 = _uncommonBits_T_36[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_37 = _uncommonBits_T_37[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_38 = _uncommonBits_T_38[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_39 = _uncommonBits_T_39[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_40 = _uncommonBits_T_40[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_41 = _uncommonBits_T_41[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_42 = _uncommonBits_T_42[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_43 = _uncommonBits_T_43[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_36 = io_in_d_bits_source_0 == 7'h10; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_0 = _source_ok_T_36; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_4 = _source_ok_uncommonBits_T_4[1:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] _source_ok_T_37 = io_in_d_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_43 = io_in_d_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_49 = io_in_d_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_55 = io_in_d_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire _source_ok_T_38 = _source_ok_T_37 == 5'h0; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_40 = _source_ok_T_38; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_42 = _source_ok_T_40; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_1 = _source_ok_T_42; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_5 = _source_ok_uncommonBits_T_5[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_44 = _source_ok_T_43 == 5'h1; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_46 = _source_ok_T_44; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_48 = _source_ok_T_46; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_2 = _source_ok_T_48; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_6 = _source_ok_uncommonBits_T_6[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_50 = _source_ok_T_49 == 5'h2; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_52 = _source_ok_T_50; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_54 = _source_ok_T_52; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_3 = _source_ok_T_54; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_7 = _source_ok_uncommonBits_T_7[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_56 = _source_ok_T_55 == 5'h3; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_58 = _source_ok_T_56; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_60 = _source_ok_T_58; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_4 = _source_ok_T_60; // @[Parameters.scala:1138:31]
wire _source_ok_T_61 = io_in_d_bits_source_0 == 7'h20; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_5 = _source_ok_T_61; // @[Parameters.scala:1138:31]
wire _source_ok_T_62 = io_in_d_bits_source_0 == 7'h21; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_6 = _source_ok_T_62; // @[Parameters.scala:1138:31]
wire _source_ok_T_63 = io_in_d_bits_source_0 == 7'h22; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_7 = _source_ok_T_63; // @[Parameters.scala:1138:31]
wire _source_ok_T_64 = io_in_d_bits_source_0 == 7'h40; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_8 = _source_ok_T_64; // @[Parameters.scala:1138:31]
wire _source_ok_T_65 = _source_ok_WIRE_1_0 | _source_ok_WIRE_1_1; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_66 = _source_ok_T_65 | _source_ok_WIRE_1_2; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_67 = _source_ok_T_66 | _source_ok_WIRE_1_3; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_68 = _source_ok_T_67 | _source_ok_WIRE_1_4; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_69 = _source_ok_T_68 | _source_ok_WIRE_1_5; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_70 = _source_ok_T_69 | _source_ok_WIRE_1_6; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_71 = _source_ok_T_70 | _source_ok_WIRE_1_7; // @[Parameters.scala:1138:31, :1139:46]
wire source_ok_1 = _source_ok_T_71 | _source_ok_WIRE_1_8; // @[Parameters.scala:1138:31, :1139:46]
wire _T_1167 = io_in_a_ready_0 & io_in_a_valid_0; // @[Decoupled.scala:51:35]
wire _a_first_T; // @[Decoupled.scala:51:35]
assign _a_first_T = _T_1167; // @[Decoupled.scala:51:35]
wire _a_first_T_1; // @[Decoupled.scala:51:35]
assign _a_first_T_1 = _T_1167; // @[Decoupled.scala:51:35]
wire [5:0] _a_first_beats1_decode_T_1 = _a_first_beats1_decode_T[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _a_first_beats1_decode_T_2 = ~_a_first_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire [2:0] a_first_beats1_decode = _a_first_beats1_decode_T_2[5:3]; // @[package.scala:243:46]
wire _a_first_beats1_opdata_T = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7]
wire _a_first_beats1_opdata_T_1 = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7]
wire a_first_beats1_opdata = ~_a_first_beats1_opdata_T; // @[Edges.scala:92:{28,37}]
wire [2:0] a_first_beats1 = a_first_beats1_opdata ? a_first_beats1_decode : 3'h0; // @[Edges.scala:92:28, :220:59, :221:14]
reg [2:0] a_first_counter; // @[Edges.scala:229:27]
wire [3:0] _a_first_counter1_T = {1'h0, a_first_counter} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] a_first_counter1 = _a_first_counter1_T[2:0]; // @[Edges.scala:230:28]
wire a_first = a_first_counter == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _a_first_last_T = a_first_counter == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _a_first_last_T_1 = a_first_beats1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire a_first_last = _a_first_last_T | _a_first_last_T_1; // @[Edges.scala:232:{25,33,43}]
wire a_first_done = a_first_last & _a_first_T; // @[Decoupled.scala:51:35]
wire [2:0] _a_first_count_T = ~a_first_counter1; // @[Edges.scala:230:28, :234:27]
wire [2:0] a_first_count = a_first_beats1 & _a_first_count_T; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _a_first_counter_T = a_first ? a_first_beats1 : a_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
reg [2:0] opcode; // @[Monitor.scala:387:22]
reg [2:0] param; // @[Monitor.scala:388:22]
reg [2:0] size; // @[Monitor.scala:389:22]
reg [6:0] source; // @[Monitor.scala:390:22]
reg [28:0] address; // @[Monitor.scala:391:22]
wire _T_1240 = io_in_d_ready_0 & io_in_d_valid_0; // @[Decoupled.scala:51:35]
wire _d_first_T; // @[Decoupled.scala:51:35]
assign _d_first_T = _T_1240; // @[Decoupled.scala:51:35]
wire _d_first_T_1; // @[Decoupled.scala:51:35]
assign _d_first_T_1 = _T_1240; // @[Decoupled.scala:51:35]
wire _d_first_T_2; // @[Decoupled.scala:51:35]
assign _d_first_T_2 = _T_1240; // @[Decoupled.scala:51:35]
wire [12:0] _GEN_0 = 13'h3F << io_in_d_bits_size_0; // @[package.scala:243:71]
wire [12:0] _d_first_beats1_decode_T; // @[package.scala:243:71]
assign _d_first_beats1_decode_T = _GEN_0; // @[package.scala:243:71]
wire [12:0] _d_first_beats1_decode_T_3; // @[package.scala:243:71]
assign _d_first_beats1_decode_T_3 = _GEN_0; // @[package.scala:243:71]
wire [12:0] _d_first_beats1_decode_T_6; // @[package.scala:243:71]
assign _d_first_beats1_decode_T_6 = _GEN_0; // @[package.scala:243:71]
wire [5:0] _d_first_beats1_decode_T_1 = _d_first_beats1_decode_T[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _d_first_beats1_decode_T_2 = ~_d_first_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire [2:0] d_first_beats1_decode = _d_first_beats1_decode_T_2[5:3]; // @[package.scala:243:46]
wire d_first_beats1_opdata = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire d_first_beats1_opdata_1 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire d_first_beats1_opdata_2 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire [2:0] d_first_beats1 = d_first_beats1_opdata ? d_first_beats1_decode : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [2:0] d_first_counter; // @[Edges.scala:229:27]
wire [3:0] _d_first_counter1_T = {1'h0, d_first_counter} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] d_first_counter1 = _d_first_counter1_T[2:0]; // @[Edges.scala:230:28]
wire d_first = d_first_counter == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T = d_first_counter == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_1 = d_first_beats1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last = _d_first_last_T | _d_first_last_T_1; // @[Edges.scala:232:{25,33,43}]
wire d_first_done = d_first_last & _d_first_T; // @[Decoupled.scala:51:35]
wire [2:0] _d_first_count_T = ~d_first_counter1; // @[Edges.scala:230:28, :234:27]
wire [2:0] d_first_count = d_first_beats1 & _d_first_count_T; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _d_first_counter_T = d_first ? d_first_beats1 : d_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
reg [2:0] opcode_1; // @[Monitor.scala:538:22]
reg [1:0] param_1; // @[Monitor.scala:539:22]
reg [2:0] size_1; // @[Monitor.scala:540:22]
reg [6:0] source_1; // @[Monitor.scala:541:22]
reg sink; // @[Monitor.scala:542:22]
reg denied; // @[Monitor.scala:543:22]
reg [64:0] inflight; // @[Monitor.scala:614:27]
reg [259:0] inflight_opcodes; // @[Monitor.scala:616:35]
reg [259:0] inflight_sizes; // @[Monitor.scala:618:33]
wire [5:0] _a_first_beats1_decode_T_4 = _a_first_beats1_decode_T_3[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _a_first_beats1_decode_T_5 = ~_a_first_beats1_decode_T_4; // @[package.scala:243:{46,76}]
wire [2:0] a_first_beats1_decode_1 = _a_first_beats1_decode_T_5[5:3]; // @[package.scala:243:46]
wire a_first_beats1_opdata_1 = ~_a_first_beats1_opdata_T_1; // @[Edges.scala:92:{28,37}]
wire [2:0] a_first_beats1_1 = a_first_beats1_opdata_1 ? a_first_beats1_decode_1 : 3'h0; // @[Edges.scala:92:28, :220:59, :221:14]
reg [2:0] a_first_counter_1; // @[Edges.scala:229:27]
wire [3:0] _a_first_counter1_T_1 = {1'h0, a_first_counter_1} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] a_first_counter1_1 = _a_first_counter1_T_1[2:0]; // @[Edges.scala:230:28]
wire a_first_1 = a_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _a_first_last_T_2 = a_first_counter_1 == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _a_first_last_T_3 = a_first_beats1_1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire a_first_last_1 = _a_first_last_T_2 | _a_first_last_T_3; // @[Edges.scala:232:{25,33,43}]
wire a_first_done_1 = a_first_last_1 & _a_first_T_1; // @[Decoupled.scala:51:35]
wire [2:0] _a_first_count_T_1 = ~a_first_counter1_1; // @[Edges.scala:230:28, :234:27]
wire [2:0] a_first_count_1 = a_first_beats1_1 & _a_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _a_first_counter_T_1 = a_first_1 ? a_first_beats1_1 : a_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [5:0] _d_first_beats1_decode_T_4 = _d_first_beats1_decode_T_3[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _d_first_beats1_decode_T_5 = ~_d_first_beats1_decode_T_4; // @[package.scala:243:{46,76}]
wire [2:0] d_first_beats1_decode_1 = _d_first_beats1_decode_T_5[5:3]; // @[package.scala:243:46]
wire [2:0] d_first_beats1_1 = d_first_beats1_opdata_1 ? d_first_beats1_decode_1 : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [2:0] d_first_counter_1; // @[Edges.scala:229:27]
wire [3:0] _d_first_counter1_T_1 = {1'h0, d_first_counter_1} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] d_first_counter1_1 = _d_first_counter1_T_1[2:0]; // @[Edges.scala:230:28]
wire d_first_1 = d_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T_2 = d_first_counter_1 == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_3 = d_first_beats1_1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last_1 = _d_first_last_T_2 | _d_first_last_T_3; // @[Edges.scala:232:{25,33,43}]
wire d_first_done_1 = d_first_last_1 & _d_first_T_1; // @[Decoupled.scala:51:35]
wire [2:0] _d_first_count_T_1 = ~d_first_counter1_1; // @[Edges.scala:230:28, :234:27]
wire [2:0] d_first_count_1 = d_first_beats1_1 & _d_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _d_first_counter_T_1 = d_first_1 ? d_first_beats1_1 : d_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [64:0] a_set; // @[Monitor.scala:626:34]
wire [64:0] a_set_wo_ready; // @[Monitor.scala:627:34]
wire [259:0] a_opcodes_set; // @[Monitor.scala:630:33]
wire [259:0] a_sizes_set; // @[Monitor.scala:632:31]
wire [2:0] a_opcode_lookup; // @[Monitor.scala:635:35]
wire [9:0] _GEN_1 = {1'h0, io_in_d_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :637:69]
wire [9:0] _a_opcode_lookup_T; // @[Monitor.scala:637:69]
assign _a_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69]
wire [9:0] _a_size_lookup_T; // @[Monitor.scala:641:65]
assign _a_size_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :641:65]
wire [9:0] _d_opcodes_clr_T_4; // @[Monitor.scala:680:101]
assign _d_opcodes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :680:101]
wire [9:0] _d_sizes_clr_T_4; // @[Monitor.scala:681:99]
assign _d_sizes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :681:99]
wire [9:0] _c_opcode_lookup_T; // @[Monitor.scala:749:69]
assign _c_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :749:69]
wire [9:0] _c_size_lookup_T; // @[Monitor.scala:750:67]
assign _c_size_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :750:67]
wire [9:0] _d_opcodes_clr_T_10; // @[Monitor.scala:790:101]
assign _d_opcodes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :790:101]
wire [9:0] _d_sizes_clr_T_10; // @[Monitor.scala:791:99]
assign _d_sizes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :791:99]
wire [259:0] _a_opcode_lookup_T_1 = inflight_opcodes >> _a_opcode_lookup_T; // @[Monitor.scala:616:35, :637:{44,69}]
wire [259:0] _a_opcode_lookup_T_6 = {256'h0, _a_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:637:{44,97}]
wire [259:0] _a_opcode_lookup_T_7 = {1'h0, _a_opcode_lookup_T_6[259:1]}; // @[Monitor.scala:637:{97,152}]
assign a_opcode_lookup = _a_opcode_lookup_T_7[2:0]; // @[Monitor.scala:635:35, :637:{21,152}]
wire [3:0] a_size_lookup; // @[Monitor.scala:639:33]
wire [259:0] _a_size_lookup_T_1 = inflight_sizes >> _a_size_lookup_T; // @[Monitor.scala:618:33, :641:{40,65}]
wire [259:0] _a_size_lookup_T_6 = {256'h0, _a_size_lookup_T_1[3:0]}; // @[Monitor.scala:641:{40,91}]
wire [259:0] _a_size_lookup_T_7 = {1'h0, _a_size_lookup_T_6[259:1]}; // @[Monitor.scala:641:{91,144}]
assign a_size_lookup = _a_size_lookup_T_7[3:0]; // @[Monitor.scala:639:33, :641:{19,144}]
wire [3:0] a_opcodes_set_interm; // @[Monitor.scala:646:40]
wire [3:0] a_sizes_set_interm; // @[Monitor.scala:648:38]
wire _same_cycle_resp_T = io_in_a_valid_0 & a_first_1; // @[Monitor.scala:36:7, :651:26, :684:44]
wire [127:0] _GEN_2 = 128'h1 << io_in_a_bits_source_0; // @[OneHot.scala:58:35]
wire [127:0] _a_set_wo_ready_T; // @[OneHot.scala:58:35]
assign _a_set_wo_ready_T = _GEN_2; // @[OneHot.scala:58:35]
wire [127:0] _a_set_T; // @[OneHot.scala:58:35]
assign _a_set_T = _GEN_2; // @[OneHot.scala:58:35]
assign a_set_wo_ready = _same_cycle_resp_T ? _a_set_wo_ready_T[64:0] : 65'h0; // @[OneHot.scala:58:35]
wire _T_1093 = _T_1167 & a_first_1; // @[Decoupled.scala:51:35]
assign a_set = _T_1093 ? _a_set_T[64:0] : 65'h0; // @[OneHot.scala:58:35]
wire [3:0] _a_opcodes_set_interm_T = {io_in_a_bits_opcode_0, 1'h0}; // @[Monitor.scala:36:7, :657:53]
wire [3:0] _a_opcodes_set_interm_T_1 = {_a_opcodes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:657:{53,61}]
assign a_opcodes_set_interm = _T_1093 ? _a_opcodes_set_interm_T_1 : 4'h0; // @[Monitor.scala:646:40, :655:{25,70}, :657:{28,61}]
wire [3:0] _a_sizes_set_interm_T = {io_in_a_bits_size_0, 1'h0}; // @[Monitor.scala:36:7, :658:51]
wire [3:0] _a_sizes_set_interm_T_1 = {_a_sizes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:658:{51,59}]
assign a_sizes_set_interm = _T_1093 ? _a_sizes_set_interm_T_1 : 4'h0; // @[Monitor.scala:648:38, :655:{25,70}, :658:{28,59}]
wire [9:0] _GEN_3 = {1'h0, io_in_a_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :659:79]
wire [9:0] _a_opcodes_set_T; // @[Monitor.scala:659:79]
assign _a_opcodes_set_T = _GEN_3; // @[Monitor.scala:659:79]
wire [9:0] _a_sizes_set_T; // @[Monitor.scala:660:77]
assign _a_sizes_set_T = _GEN_3; // @[Monitor.scala:659:79, :660:77]
wire [1026:0] _a_opcodes_set_T_1 = {1023'h0, a_opcodes_set_interm} << _a_opcodes_set_T; // @[Monitor.scala:646:40, :659:{54,79}]
assign a_opcodes_set = _T_1093 ? _a_opcodes_set_T_1[259:0] : 260'h0; // @[Monitor.scala:630:33, :655:{25,70}, :659:{28,54}]
wire [1026:0] _a_sizes_set_T_1 = {1023'h0, a_sizes_set_interm} << _a_sizes_set_T; // @[Monitor.scala:648:38, :659:54, :660:{52,77}]
assign a_sizes_set = _T_1093 ? _a_sizes_set_T_1[259:0] : 260'h0; // @[Monitor.scala:632:31, :655:{25,70}, :660:{28,52}]
wire [64:0] d_clr; // @[Monitor.scala:664:34]
wire [64:0] d_clr_wo_ready; // @[Monitor.scala:665:34]
wire [259:0] d_opcodes_clr; // @[Monitor.scala:668:33]
wire [259:0] d_sizes_clr; // @[Monitor.scala:670:31]
wire _GEN_4 = io_in_d_bits_opcode_0 == 3'h6; // @[Monitor.scala:36:7, :673:46]
wire d_release_ack; // @[Monitor.scala:673:46]
assign d_release_ack = _GEN_4; // @[Monitor.scala:673:46]
wire d_release_ack_1; // @[Monitor.scala:783:46]
assign d_release_ack_1 = _GEN_4; // @[Monitor.scala:673:46, :783:46]
wire _T_1139 = io_in_d_valid_0 & d_first_1; // @[Monitor.scala:36:7, :674:26]
wire [127:0] _GEN_5 = 128'h1 << io_in_d_bits_source_0; // @[OneHot.scala:58:35]
wire [127:0] _d_clr_wo_ready_T; // @[OneHot.scala:58:35]
assign _d_clr_wo_ready_T = _GEN_5; // @[OneHot.scala:58:35]
wire [127:0] _d_clr_T; // @[OneHot.scala:58:35]
assign _d_clr_T = _GEN_5; // @[OneHot.scala:58:35]
wire [127:0] _d_clr_wo_ready_T_1; // @[OneHot.scala:58:35]
assign _d_clr_wo_ready_T_1 = _GEN_5; // @[OneHot.scala:58:35]
wire [127:0] _d_clr_T_1; // @[OneHot.scala:58:35]
assign _d_clr_T_1 = _GEN_5; // @[OneHot.scala:58:35]
assign d_clr_wo_ready = _T_1139 & ~d_release_ack ? _d_clr_wo_ready_T[64:0] : 65'h0; // @[OneHot.scala:58:35]
wire _T_1108 = _T_1240 & d_first_1 & ~d_release_ack; // @[Decoupled.scala:51:35]
assign d_clr = _T_1108 ? _d_clr_T[64:0] : 65'h0; // @[OneHot.scala:58:35]
wire [1038:0] _d_opcodes_clr_T_5 = 1039'hF << _d_opcodes_clr_T_4; // @[Monitor.scala:680:{76,101}]
assign d_opcodes_clr = _T_1108 ? _d_opcodes_clr_T_5[259:0] : 260'h0; // @[Monitor.scala:668:33, :678:{25,70,89}, :680:{21,76}]
wire [1038:0] _d_sizes_clr_T_5 = 1039'hF << _d_sizes_clr_T_4; // @[Monitor.scala:681:{74,99}]
assign d_sizes_clr = _T_1108 ? _d_sizes_clr_T_5[259:0] : 260'h0; // @[Monitor.scala:670:31, :678:{25,70,89}, :681:{21,74}]
wire _same_cycle_resp_T_1 = _same_cycle_resp_T; // @[Monitor.scala:684:{44,55}]
wire _same_cycle_resp_T_2 = io_in_a_bits_source_0 == io_in_d_bits_source_0; // @[Monitor.scala:36:7, :684:113]
wire same_cycle_resp = _same_cycle_resp_T_1 & _same_cycle_resp_T_2; // @[Monitor.scala:684:{55,88,113}]
wire [64:0] _inflight_T = inflight | a_set; // @[Monitor.scala:614:27, :626:34, :705:27]
wire [64:0] _inflight_T_1 = ~d_clr; // @[Monitor.scala:664:34, :705:38]
wire [64:0] _inflight_T_2 = _inflight_T & _inflight_T_1; // @[Monitor.scala:705:{27,36,38}]
wire [259:0] _inflight_opcodes_T = inflight_opcodes | a_opcodes_set; // @[Monitor.scala:616:35, :630:33, :706:43]
wire [259:0] _inflight_opcodes_T_1 = ~d_opcodes_clr; // @[Monitor.scala:668:33, :706:62]
wire [259:0] _inflight_opcodes_T_2 = _inflight_opcodes_T & _inflight_opcodes_T_1; // @[Monitor.scala:706:{43,60,62}]
wire [259:0] _inflight_sizes_T = inflight_sizes | a_sizes_set; // @[Monitor.scala:618:33, :632:31, :707:39]
wire [259:0] _inflight_sizes_T_1 = ~d_sizes_clr; // @[Monitor.scala:670:31, :707:56]
wire [259:0] _inflight_sizes_T_2 = _inflight_sizes_T & _inflight_sizes_T_1; // @[Monitor.scala:707:{39,54,56}]
reg [31:0] watchdog; // @[Monitor.scala:709:27]
wire [32:0] _watchdog_T = {1'h0, watchdog} + 33'h1; // @[Monitor.scala:709:27, :714:26]
wire [31:0] _watchdog_T_1 = _watchdog_T[31:0]; // @[Monitor.scala:714:26]
reg [64:0] inflight_1; // @[Monitor.scala:726:35]
wire [64:0] _inflight_T_3 = inflight_1; // @[Monitor.scala:726:35, :814:35]
reg [259:0] inflight_opcodes_1; // @[Monitor.scala:727:35]
wire [259:0] _inflight_opcodes_T_3 = inflight_opcodes_1; // @[Monitor.scala:727:35, :815:43]
reg [259:0] inflight_sizes_1; // @[Monitor.scala:728:35]
wire [259:0] _inflight_sizes_T_3 = inflight_sizes_1; // @[Monitor.scala:728:35, :816:41]
wire [5:0] _d_first_beats1_decode_T_7 = _d_first_beats1_decode_T_6[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _d_first_beats1_decode_T_8 = ~_d_first_beats1_decode_T_7; // @[package.scala:243:{46,76}]
wire [2:0] d_first_beats1_decode_2 = _d_first_beats1_decode_T_8[5:3]; // @[package.scala:243:46]
wire [2:0] d_first_beats1_2 = d_first_beats1_opdata_2 ? d_first_beats1_decode_2 : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [2:0] d_first_counter_2; // @[Edges.scala:229:27]
wire [3:0] _d_first_counter1_T_2 = {1'h0, d_first_counter_2} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] d_first_counter1_2 = _d_first_counter1_T_2[2:0]; // @[Edges.scala:230:28]
wire d_first_2 = d_first_counter_2 == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T_4 = d_first_counter_2 == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_5 = d_first_beats1_2 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last_2 = _d_first_last_T_4 | _d_first_last_T_5; // @[Edges.scala:232:{25,33,43}]
wire d_first_done_2 = d_first_last_2 & _d_first_T_2; // @[Decoupled.scala:51:35]
wire [2:0] _d_first_count_T_2 = ~d_first_counter1_2; // @[Edges.scala:230:28, :234:27]
wire [2:0] d_first_count_2 = d_first_beats1_2 & _d_first_count_T_2; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _d_first_counter_T_2 = d_first_2 ? d_first_beats1_2 : d_first_counter1_2; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [3:0] c_opcode_lookup; // @[Monitor.scala:747:35]
wire [3:0] c_size_lookup; // @[Monitor.scala:748:35]
wire [259:0] _c_opcode_lookup_T_1 = inflight_opcodes_1 >> _c_opcode_lookup_T; // @[Monitor.scala:727:35, :749:{44,69}]
wire [259:0] _c_opcode_lookup_T_6 = {256'h0, _c_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:749:{44,97}]
wire [259:0] _c_opcode_lookup_T_7 = {1'h0, _c_opcode_lookup_T_6[259:1]}; // @[Monitor.scala:749:{97,152}]
assign c_opcode_lookup = _c_opcode_lookup_T_7[3:0]; // @[Monitor.scala:747:35, :749:{21,152}]
wire [259:0] _c_size_lookup_T_1 = inflight_sizes_1 >> _c_size_lookup_T; // @[Monitor.scala:728:35, :750:{42,67}]
wire [259:0] _c_size_lookup_T_6 = {256'h0, _c_size_lookup_T_1[3:0]}; // @[Monitor.scala:750:{42,93}]
wire [259:0] _c_size_lookup_T_7 = {1'h0, _c_size_lookup_T_6[259:1]}; // @[Monitor.scala:750:{93,146}]
assign c_size_lookup = _c_size_lookup_T_7[3:0]; // @[Monitor.scala:748:35, :750:{21,146}]
wire [64:0] d_clr_1; // @[Monitor.scala:774:34]
wire [64:0] d_clr_wo_ready_1; // @[Monitor.scala:775:34]
wire [259:0] d_opcodes_clr_1; // @[Monitor.scala:776:34]
wire [259:0] d_sizes_clr_1; // @[Monitor.scala:777:34]
wire _T_1211 = io_in_d_valid_0 & d_first_2; // @[Monitor.scala:36:7, :784:26]
assign d_clr_wo_ready_1 = _T_1211 & d_release_ack_1 ? _d_clr_wo_ready_T_1[64:0] : 65'h0; // @[OneHot.scala:58:35]
wire _T_1193 = _T_1240 & d_first_2 & d_release_ack_1; // @[Decoupled.scala:51:35]
assign d_clr_1 = _T_1193 ? _d_clr_T_1[64:0] : 65'h0; // @[OneHot.scala:58:35]
wire [1038:0] _d_opcodes_clr_T_11 = 1039'hF << _d_opcodes_clr_T_10; // @[Monitor.scala:790:{76,101}]
assign d_opcodes_clr_1 = _T_1193 ? _d_opcodes_clr_T_11[259:0] : 260'h0; // @[Monitor.scala:776:34, :788:{25,70,88}, :790:{21,76}]
wire [1038:0] _d_sizes_clr_T_11 = 1039'hF << _d_sizes_clr_T_10; // @[Monitor.scala:791:{74,99}]
assign d_sizes_clr_1 = _T_1193 ? _d_sizes_clr_T_11[259:0] : 260'h0; // @[Monitor.scala:777:34, :788:{25,70,88}, :791:{21,74}]
wire _same_cycle_resp_T_8 = io_in_d_bits_source_0 == 7'h0; // @[Monitor.scala:36:7, :795:113]
wire [64:0] _inflight_T_4 = ~d_clr_1; // @[Monitor.scala:774:34, :814:46]
wire [64:0] _inflight_T_5 = _inflight_T_3 & _inflight_T_4; // @[Monitor.scala:814:{35,44,46}]
wire [259:0] _inflight_opcodes_T_4 = ~d_opcodes_clr_1; // @[Monitor.scala:776:34, :815:62]
wire [259:0] _inflight_opcodes_T_5 = _inflight_opcodes_T_3 & _inflight_opcodes_T_4; // @[Monitor.scala:815:{43,60,62}]
wire [259:0] _inflight_sizes_T_4 = ~d_sizes_clr_1; // @[Monitor.scala:777:34, :816:58]
wire [259:0] _inflight_sizes_T_5 = _inflight_sizes_T_3 & _inflight_sizes_T_4; // @[Monitor.scala:816:{41,56,58}]
reg [31:0] watchdog_1; // @[Monitor.scala:818:27] |
Generate the Verilog code corresponding to the following Chisel files.
File FPU.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tile
import chisel3._
import chisel3.util._
import chisel3.{DontCare, WireInit, withClock, withReset}
import chisel3.experimental.SourceInfo
import chisel3.experimental.dataview._
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.rocket._
import freechips.rocketchip.rocket.Instructions._
import freechips.rocketchip.util._
import freechips.rocketchip.util.property
case class FPUParams(
minFLen: Int = 32,
fLen: Int = 64,
divSqrt: Boolean = true,
sfmaLatency: Int = 3,
dfmaLatency: Int = 4,
fpmuLatency: Int = 2,
ifpuLatency: Int = 2
)
object FPConstants
{
val RM_SZ = 3
val FLAGS_SZ = 5
}
trait HasFPUCtrlSigs {
val ldst = Bool()
val wen = Bool()
val ren1 = Bool()
val ren2 = Bool()
val ren3 = Bool()
val swap12 = Bool()
val swap23 = Bool()
val typeTagIn = UInt(2.W)
val typeTagOut = UInt(2.W)
val fromint = Bool()
val toint = Bool()
val fastpipe = Bool()
val fma = Bool()
val div = Bool()
val sqrt = Bool()
val wflags = Bool()
val vec = Bool()
}
class FPUCtrlSigs extends Bundle with HasFPUCtrlSigs
class FPUDecoder(implicit p: Parameters) extends FPUModule()(p) {
val io = IO(new Bundle {
val inst = Input(Bits(32.W))
val sigs = Output(new FPUCtrlSigs())
})
private val X2 = BitPat.dontCare(2)
val default = List(X,X,X,X,X,X,X,X2,X2,X,X,X,X,X,X,X,N)
val h: Array[(BitPat, List[BitPat])] =
Array(FLH -> List(Y,Y,N,N,N,X,X,X2,X2,N,N,N,N,N,N,N,N),
FSH -> List(Y,N,N,Y,N,Y,X, I, H,N,Y,N,N,N,N,N,N),
FMV_H_X -> List(N,Y,N,N,N,X,X, H, I,Y,N,N,N,N,N,N,N),
FCVT_H_W -> List(N,Y,N,N,N,X,X, H, H,Y,N,N,N,N,N,Y,N),
FCVT_H_WU-> List(N,Y,N,N,N,X,X, H, H,Y,N,N,N,N,N,Y,N),
FCVT_H_L -> List(N,Y,N,N,N,X,X, H, H,Y,N,N,N,N,N,Y,N),
FCVT_H_LU-> List(N,Y,N,N,N,X,X, H, H,Y,N,N,N,N,N,Y,N),
FMV_X_H -> List(N,N,Y,N,N,N,X, I, H,N,Y,N,N,N,N,N,N),
FCLASS_H -> List(N,N,Y,N,N,N,X, H, H,N,Y,N,N,N,N,N,N),
FCVT_W_H -> List(N,N,Y,N,N,N,X, H,X2,N,Y,N,N,N,N,Y,N),
FCVT_WU_H-> List(N,N,Y,N,N,N,X, H,X2,N,Y,N,N,N,N,Y,N),
FCVT_L_H -> List(N,N,Y,N,N,N,X, H,X2,N,Y,N,N,N,N,Y,N),
FCVT_LU_H-> List(N,N,Y,N,N,N,X, H,X2,N,Y,N,N,N,N,Y,N),
FCVT_S_H -> List(N,Y,Y,N,N,N,X, H, S,N,N,Y,N,N,N,Y,N),
FCVT_H_S -> List(N,Y,Y,N,N,N,X, S, H,N,N,Y,N,N,N,Y,N),
FEQ_H -> List(N,N,Y,Y,N,N,N, H, H,N,Y,N,N,N,N,Y,N),
FLT_H -> List(N,N,Y,Y,N,N,N, H, H,N,Y,N,N,N,N,Y,N),
FLE_H -> List(N,N,Y,Y,N,N,N, H, H,N,Y,N,N,N,N,Y,N),
FSGNJ_H -> List(N,Y,Y,Y,N,N,N, H, H,N,N,Y,N,N,N,N,N),
FSGNJN_H -> List(N,Y,Y,Y,N,N,N, H, H,N,N,Y,N,N,N,N,N),
FSGNJX_H -> List(N,Y,Y,Y,N,N,N, H, H,N,N,Y,N,N,N,N,N),
FMIN_H -> List(N,Y,Y,Y,N,N,N, H, H,N,N,Y,N,N,N,Y,N),
FMAX_H -> List(N,Y,Y,Y,N,N,N, H, H,N,N,Y,N,N,N,Y,N),
FADD_H -> List(N,Y,Y,Y,N,N,Y, H, H,N,N,N,Y,N,N,Y,N),
FSUB_H -> List(N,Y,Y,Y,N,N,Y, H, H,N,N,N,Y,N,N,Y,N),
FMUL_H -> List(N,Y,Y,Y,N,N,N, H, H,N,N,N,Y,N,N,Y,N),
FMADD_H -> List(N,Y,Y,Y,Y,N,N, H, H,N,N,N,Y,N,N,Y,N),
FMSUB_H -> List(N,Y,Y,Y,Y,N,N, H, H,N,N,N,Y,N,N,Y,N),
FNMADD_H -> List(N,Y,Y,Y,Y,N,N, H, H,N,N,N,Y,N,N,Y,N),
FNMSUB_H -> List(N,Y,Y,Y,Y,N,N, H, H,N,N,N,Y,N,N,Y,N),
FDIV_H -> List(N,Y,Y,Y,N,N,N, H, H,N,N,N,N,Y,N,Y,N),
FSQRT_H -> List(N,Y,Y,N,N,N,X, H, H,N,N,N,N,N,Y,Y,N))
val f: Array[(BitPat, List[BitPat])] =
Array(FLW -> List(Y,Y,N,N,N,X,X,X2,X2,N,N,N,N,N,N,N,N),
FSW -> List(Y,N,N,Y,N,Y,X, I, S,N,Y,N,N,N,N,N,N),
FMV_W_X -> List(N,Y,N,N,N,X,X, S, I,Y,N,N,N,N,N,N,N),
FCVT_S_W -> List(N,Y,N,N,N,X,X, S, S,Y,N,N,N,N,N,Y,N),
FCVT_S_WU-> List(N,Y,N,N,N,X,X, S, S,Y,N,N,N,N,N,Y,N),
FCVT_S_L -> List(N,Y,N,N,N,X,X, S, S,Y,N,N,N,N,N,Y,N),
FCVT_S_LU-> List(N,Y,N,N,N,X,X, S, S,Y,N,N,N,N,N,Y,N),
FMV_X_W -> List(N,N,Y,N,N,N,X, I, S,N,Y,N,N,N,N,N,N),
FCLASS_S -> List(N,N,Y,N,N,N,X, S, S,N,Y,N,N,N,N,N,N),
FCVT_W_S -> List(N,N,Y,N,N,N,X, S,X2,N,Y,N,N,N,N,Y,N),
FCVT_WU_S-> List(N,N,Y,N,N,N,X, S,X2,N,Y,N,N,N,N,Y,N),
FCVT_L_S -> List(N,N,Y,N,N,N,X, S,X2,N,Y,N,N,N,N,Y,N),
FCVT_LU_S-> List(N,N,Y,N,N,N,X, S,X2,N,Y,N,N,N,N,Y,N),
FEQ_S -> List(N,N,Y,Y,N,N,N, S, S,N,Y,N,N,N,N,Y,N),
FLT_S -> List(N,N,Y,Y,N,N,N, S, S,N,Y,N,N,N,N,Y,N),
FLE_S -> List(N,N,Y,Y,N,N,N, S, S,N,Y,N,N,N,N,Y,N),
FSGNJ_S -> List(N,Y,Y,Y,N,N,N, S, S,N,N,Y,N,N,N,N,N),
FSGNJN_S -> List(N,Y,Y,Y,N,N,N, S, S,N,N,Y,N,N,N,N,N),
FSGNJX_S -> List(N,Y,Y,Y,N,N,N, S, S,N,N,Y,N,N,N,N,N),
FMIN_S -> List(N,Y,Y,Y,N,N,N, S, S,N,N,Y,N,N,N,Y,N),
FMAX_S -> List(N,Y,Y,Y,N,N,N, S, S,N,N,Y,N,N,N,Y,N),
FADD_S -> List(N,Y,Y,Y,N,N,Y, S, S,N,N,N,Y,N,N,Y,N),
FSUB_S -> List(N,Y,Y,Y,N,N,Y, S, S,N,N,N,Y,N,N,Y,N),
FMUL_S -> List(N,Y,Y,Y,N,N,N, S, S,N,N,N,Y,N,N,Y,N),
FMADD_S -> List(N,Y,Y,Y,Y,N,N, S, S,N,N,N,Y,N,N,Y,N),
FMSUB_S -> List(N,Y,Y,Y,Y,N,N, S, S,N,N,N,Y,N,N,Y,N),
FNMADD_S -> List(N,Y,Y,Y,Y,N,N, S, S,N,N,N,Y,N,N,Y,N),
FNMSUB_S -> List(N,Y,Y,Y,Y,N,N, S, S,N,N,N,Y,N,N,Y,N),
FDIV_S -> List(N,Y,Y,Y,N,N,N, S, S,N,N,N,N,Y,N,Y,N),
FSQRT_S -> List(N,Y,Y,N,N,N,X, S, S,N,N,N,N,N,Y,Y,N))
val d: Array[(BitPat, List[BitPat])] =
Array(FLD -> List(Y,Y,N,N,N,X,X,X2,X2,N,N,N,N,N,N,N,N),
FSD -> List(Y,N,N,Y,N,Y,X, I, D,N,Y,N,N,N,N,N,N),
FMV_D_X -> List(N,Y,N,N,N,X,X, D, I,Y,N,N,N,N,N,N,N),
FCVT_D_W -> List(N,Y,N,N,N,X,X, D, D,Y,N,N,N,N,N,Y,N),
FCVT_D_WU-> List(N,Y,N,N,N,X,X, D, D,Y,N,N,N,N,N,Y,N),
FCVT_D_L -> List(N,Y,N,N,N,X,X, D, D,Y,N,N,N,N,N,Y,N),
FCVT_D_LU-> List(N,Y,N,N,N,X,X, D, D,Y,N,N,N,N,N,Y,N),
FMV_X_D -> List(N,N,Y,N,N,N,X, I, D,N,Y,N,N,N,N,N,N),
FCLASS_D -> List(N,N,Y,N,N,N,X, D, D,N,Y,N,N,N,N,N,N),
FCVT_W_D -> List(N,N,Y,N,N,N,X, D,X2,N,Y,N,N,N,N,Y,N),
FCVT_WU_D-> List(N,N,Y,N,N,N,X, D,X2,N,Y,N,N,N,N,Y,N),
FCVT_L_D -> List(N,N,Y,N,N,N,X, D,X2,N,Y,N,N,N,N,Y,N),
FCVT_LU_D-> List(N,N,Y,N,N,N,X, D,X2,N,Y,N,N,N,N,Y,N),
FCVT_S_D -> List(N,Y,Y,N,N,N,X, D, S,N,N,Y,N,N,N,Y,N),
FCVT_D_S -> List(N,Y,Y,N,N,N,X, S, D,N,N,Y,N,N,N,Y,N),
FEQ_D -> List(N,N,Y,Y,N,N,N, D, D,N,Y,N,N,N,N,Y,N),
FLT_D -> List(N,N,Y,Y,N,N,N, D, D,N,Y,N,N,N,N,Y,N),
FLE_D -> List(N,N,Y,Y,N,N,N, D, D,N,Y,N,N,N,N,Y,N),
FSGNJ_D -> List(N,Y,Y,Y,N,N,N, D, D,N,N,Y,N,N,N,N,N),
FSGNJN_D -> List(N,Y,Y,Y,N,N,N, D, D,N,N,Y,N,N,N,N,N),
FSGNJX_D -> List(N,Y,Y,Y,N,N,N, D, D,N,N,Y,N,N,N,N,N),
FMIN_D -> List(N,Y,Y,Y,N,N,N, D, D,N,N,Y,N,N,N,Y,N),
FMAX_D -> List(N,Y,Y,Y,N,N,N, D, D,N,N,Y,N,N,N,Y,N),
FADD_D -> List(N,Y,Y,Y,N,N,Y, D, D,N,N,N,Y,N,N,Y,N),
FSUB_D -> List(N,Y,Y,Y,N,N,Y, D, D,N,N,N,Y,N,N,Y,N),
FMUL_D -> List(N,Y,Y,Y,N,N,N, D, D,N,N,N,Y,N,N,Y,N),
FMADD_D -> List(N,Y,Y,Y,Y,N,N, D, D,N,N,N,Y,N,N,Y,N),
FMSUB_D -> List(N,Y,Y,Y,Y,N,N, D, D,N,N,N,Y,N,N,Y,N),
FNMADD_D -> List(N,Y,Y,Y,Y,N,N, D, D,N,N,N,Y,N,N,Y,N),
FNMSUB_D -> List(N,Y,Y,Y,Y,N,N, D, D,N,N,N,Y,N,N,Y,N),
FDIV_D -> List(N,Y,Y,Y,N,N,N, D, D,N,N,N,N,Y,N,Y,N),
FSQRT_D -> List(N,Y,Y,N,N,N,X, D, D,N,N,N,N,N,Y,Y,N))
val fcvt_hd: Array[(BitPat, List[BitPat])] =
Array(FCVT_H_D -> List(N,Y,Y,N,N,N,X, D, H,N,N,Y,N,N,N,Y,N),
FCVT_D_H -> List(N,Y,Y,N,N,N,X, H, D,N,N,Y,N,N,N,Y,N))
val vfmv_f_s: Array[(BitPat, List[BitPat])] =
Array(VFMV_F_S -> List(N,Y,N,N,N,N,X,X2,X2,N,N,N,N,N,N,N,Y))
val insns = ((minFLen, fLen) match {
case (32, 32) => f
case (16, 32) => h ++ f
case (32, 64) => f ++ d
case (16, 64) => h ++ f ++ d ++ fcvt_hd
case other => throw new Exception(s"minFLen = ${minFLen} & fLen = ${fLen} is an unsupported configuration")
}) ++ (if (usingVector) vfmv_f_s else Array[(BitPat, List[BitPat])]())
val decoder = DecodeLogic(io.inst, default, insns)
val s = io.sigs
val sigs = Seq(s.ldst, s.wen, s.ren1, s.ren2, s.ren3, s.swap12,
s.swap23, s.typeTagIn, s.typeTagOut, s.fromint, s.toint,
s.fastpipe, s.fma, s.div, s.sqrt, s.wflags, s.vec)
sigs zip decoder map {case(s,d) => s := d}
}
class FPUCoreIO(implicit p: Parameters) extends CoreBundle()(p) {
val hartid = Input(UInt(hartIdLen.W))
val time = Input(UInt(xLen.W))
val inst = Input(Bits(32.W))
val fromint_data = Input(Bits(xLen.W))
val fcsr_rm = Input(Bits(FPConstants.RM_SZ.W))
val fcsr_flags = Valid(Bits(FPConstants.FLAGS_SZ.W))
val v_sew = Input(UInt(3.W))
val store_data = Output(Bits(fLen.W))
val toint_data = Output(Bits(xLen.W))
val ll_resp_val = Input(Bool())
val ll_resp_type = Input(Bits(3.W))
val ll_resp_tag = Input(UInt(5.W))
val ll_resp_data = Input(Bits(fLen.W))
val valid = Input(Bool())
val fcsr_rdy = Output(Bool())
val nack_mem = Output(Bool())
val illegal_rm = Output(Bool())
val killx = Input(Bool())
val killm = Input(Bool())
val dec = Output(new FPUCtrlSigs())
val sboard_set = Output(Bool())
val sboard_clr = Output(Bool())
val sboard_clra = Output(UInt(5.W))
val keep_clock_enabled = Input(Bool())
}
class FPUIO(implicit p: Parameters) extends FPUCoreIO ()(p) {
val cp_req = Flipped(Decoupled(new FPInput())) //cp doesn't pay attn to kill sigs
val cp_resp = Decoupled(new FPResult())
}
class FPResult(implicit p: Parameters) extends CoreBundle()(p) {
val data = Bits((fLen+1).W)
val exc = Bits(FPConstants.FLAGS_SZ.W)
}
class IntToFPInput(implicit p: Parameters) extends CoreBundle()(p) with HasFPUCtrlSigs {
val rm = Bits(FPConstants.RM_SZ.W)
val typ = Bits(2.W)
val in1 = Bits(xLen.W)
}
class FPInput(implicit p: Parameters) extends CoreBundle()(p) with HasFPUCtrlSigs {
val rm = Bits(FPConstants.RM_SZ.W)
val fmaCmd = Bits(2.W)
val typ = Bits(2.W)
val fmt = Bits(2.W)
val in1 = Bits((fLen+1).W)
val in2 = Bits((fLen+1).W)
val in3 = Bits((fLen+1).W)
}
case class FType(exp: Int, sig: Int) {
def ieeeWidth = exp + sig
def recodedWidth = ieeeWidth + 1
def ieeeQNaN = ((BigInt(1) << (ieeeWidth - 1)) - (BigInt(1) << (sig - 2))).U(ieeeWidth.W)
def qNaN = ((BigInt(7) << (exp + sig - 3)) + (BigInt(1) << (sig - 2))).U(recodedWidth.W)
def isNaN(x: UInt) = x(sig + exp - 1, sig + exp - 3).andR
def isSNaN(x: UInt) = isNaN(x) && !x(sig - 2)
def classify(x: UInt) = {
val sign = x(sig + exp)
val code = x(exp + sig - 1, exp + sig - 3)
val codeHi = code(2, 1)
val isSpecial = codeHi === 3.U
val isHighSubnormalIn = x(exp + sig - 3, sig - 1) < 2.U
val isSubnormal = code === 1.U || codeHi === 1.U && isHighSubnormalIn
val isNormal = codeHi === 1.U && !isHighSubnormalIn || codeHi === 2.U
val isZero = code === 0.U
val isInf = isSpecial && !code(0)
val isNaN = code.andR
val isSNaN = isNaN && !x(sig-2)
val isQNaN = isNaN && x(sig-2)
Cat(isQNaN, isSNaN, isInf && !sign, isNormal && !sign,
isSubnormal && !sign, isZero && !sign, isZero && sign,
isSubnormal && sign, isNormal && sign, isInf && sign)
}
// convert between formats, ignoring rounding, range, NaN
def unsafeConvert(x: UInt, to: FType) = if (this == to) x else {
val sign = x(sig + exp)
val fractIn = x(sig - 2, 0)
val expIn = x(sig + exp - 1, sig - 1)
val fractOut = fractIn << to.sig >> sig
val expOut = {
val expCode = expIn(exp, exp - 2)
val commonCase = (expIn + (1 << to.exp).U) - (1 << exp).U
Mux(expCode === 0.U || expCode >= 6.U, Cat(expCode, commonCase(to.exp - 3, 0)), commonCase(to.exp, 0))
}
Cat(sign, expOut, fractOut)
}
private def ieeeBundle = {
val expWidth = exp
class IEEEBundle extends Bundle {
val sign = Bool()
val exp = UInt(expWidth.W)
val sig = UInt((ieeeWidth-expWidth-1).W)
}
new IEEEBundle
}
def unpackIEEE(x: UInt) = x.asTypeOf(ieeeBundle)
def recode(x: UInt) = hardfloat.recFNFromFN(exp, sig, x)
def ieee(x: UInt) = hardfloat.fNFromRecFN(exp, sig, x)
}
object FType {
val H = new FType(5, 11)
val S = new FType(8, 24)
val D = new FType(11, 53)
val all = List(H, S, D)
}
trait HasFPUParameters {
require(fLen == 0 || FType.all.exists(_.ieeeWidth == fLen))
val minFLen: Int
val fLen: Int
def xLen: Int
val minXLen = 32
val nIntTypes = log2Ceil(xLen/minXLen) + 1
def floatTypes = FType.all.filter(t => minFLen <= t.ieeeWidth && t.ieeeWidth <= fLen)
def minType = floatTypes.head
def maxType = floatTypes.last
def prevType(t: FType) = floatTypes(typeTag(t) - 1)
def maxExpWidth = maxType.exp
def maxSigWidth = maxType.sig
def typeTag(t: FType) = floatTypes.indexOf(t)
def typeTagWbOffset = (FType.all.indexOf(minType) + 1).U
def typeTagGroup(t: FType) = (if (floatTypes.contains(t)) typeTag(t) else typeTag(maxType)).U
// typeTag
def H = typeTagGroup(FType.H)
def S = typeTagGroup(FType.S)
def D = typeTagGroup(FType.D)
def I = typeTag(maxType).U
private def isBox(x: UInt, t: FType): Bool = x(t.sig + t.exp, t.sig + t.exp - 4).andR
private def box(x: UInt, xt: FType, y: UInt, yt: FType): UInt = {
require(xt.ieeeWidth == 2 * yt.ieeeWidth)
val swizzledNaN = Cat(
x(xt.sig + xt.exp, xt.sig + xt.exp - 3),
x(xt.sig - 2, yt.recodedWidth - 1).andR,
x(xt.sig + xt.exp - 5, xt.sig),
y(yt.recodedWidth - 2),
x(xt.sig - 2, yt.recodedWidth - 1),
y(yt.recodedWidth - 1),
y(yt.recodedWidth - 3, 0))
Mux(xt.isNaN(x), swizzledNaN, x)
}
// implement NaN unboxing for FU inputs
def unbox(x: UInt, tag: UInt, exactType: Option[FType]): UInt = {
val outType = exactType.getOrElse(maxType)
def helper(x: UInt, t: FType): Seq[(Bool, UInt)] = {
val prev =
if (t == minType) {
Seq()
} else {
val prevT = prevType(t)
val unswizzled = Cat(
x(prevT.sig + prevT.exp - 1),
x(t.sig - 1),
x(prevT.sig + prevT.exp - 2, 0))
val prev = helper(unswizzled, prevT)
val isbox = isBox(x, t)
prev.map(p => (isbox && p._1, p._2))
}
prev :+ (true.B, t.unsafeConvert(x, outType))
}
val (oks, floats) = helper(x, maxType).unzip
if (exactType.isEmpty || floatTypes.size == 1) {
Mux(oks(tag), floats(tag), maxType.qNaN)
} else {
val t = exactType.get
floats(typeTag(t)) | Mux(oks(typeTag(t)), 0.U, t.qNaN)
}
}
// make sure that the redundant bits in the NaN-boxed encoding are consistent
def consistent(x: UInt): Bool = {
def helper(x: UInt, t: FType): Bool = if (typeTag(t) == 0) true.B else {
val prevT = prevType(t)
val unswizzled = Cat(
x(prevT.sig + prevT.exp - 1),
x(t.sig - 1),
x(prevT.sig + prevT.exp - 2, 0))
val prevOK = !isBox(x, t) || helper(unswizzled, prevT)
val curOK = !t.isNaN(x) || x(t.sig + t.exp - 4) === x(t.sig - 2, prevT.recodedWidth - 1).andR
prevOK && curOK
}
helper(x, maxType)
}
// generate a NaN box from an FU result
def box(x: UInt, t: FType): UInt = {
if (t == maxType) {
x
} else {
val nt = floatTypes(typeTag(t) + 1)
val bigger = box(((BigInt(1) << nt.recodedWidth)-1).U, nt, x, t)
bigger | ((BigInt(1) << maxType.recodedWidth) - (BigInt(1) << nt.recodedWidth)).U
}
}
// generate a NaN box from an FU result
def box(x: UInt, tag: UInt): UInt = {
val opts = floatTypes.map(t => box(x, t))
opts(tag)
}
// zap bits that hardfloat thinks are don't-cares, but we do care about
def sanitizeNaN(x: UInt, t: FType): UInt = {
if (typeTag(t) == 0) {
x
} else {
val maskedNaN = x & ~((BigInt(1) << (t.sig-1)) | (BigInt(1) << (t.sig+t.exp-4))).U(t.recodedWidth.W)
Mux(t.isNaN(x), maskedNaN, x)
}
}
// implement NaN boxing and recoding for FL*/fmv.*.x
def recode(x: UInt, tag: UInt): UInt = {
def helper(x: UInt, t: FType): UInt = {
if (typeTag(t) == 0) {
t.recode(x)
} else {
val prevT = prevType(t)
box(t.recode(x), t, helper(x, prevT), prevT)
}
}
// fill MSBs of subword loads to emulate a wider load of a NaN-boxed value
val boxes = floatTypes.map(t => ((BigInt(1) << maxType.ieeeWidth) - (BigInt(1) << t.ieeeWidth)).U)
helper(boxes(tag) | x, maxType)
}
// implement NaN unboxing and un-recoding for FS*/fmv.x.*
def ieee(x: UInt, t: FType = maxType): UInt = {
if (typeTag(t) == 0) {
t.ieee(x)
} else {
val unrecoded = t.ieee(x)
val prevT = prevType(t)
val prevRecoded = Cat(
x(prevT.recodedWidth-2),
x(t.sig-1),
x(prevT.recodedWidth-3, 0))
val prevUnrecoded = ieee(prevRecoded, prevT)
Cat(unrecoded >> prevT.ieeeWidth, Mux(t.isNaN(x), prevUnrecoded, unrecoded(prevT.ieeeWidth-1, 0)))
}
}
}
abstract class FPUModule(implicit val p: Parameters) extends Module with HasCoreParameters with HasFPUParameters
class FPToInt(implicit p: Parameters) extends FPUModule()(p) with ShouldBeRetimed {
class Output extends Bundle {
val in = new FPInput
val lt = Bool()
val store = Bits(fLen.W)
val toint = Bits(xLen.W)
val exc = Bits(FPConstants.FLAGS_SZ.W)
}
val io = IO(new Bundle {
val in = Flipped(Valid(new FPInput))
val out = Valid(new Output)
})
val in = RegEnable(io.in.bits, io.in.valid)
val valid = RegNext(io.in.valid)
val dcmp = Module(new hardfloat.CompareRecFN(maxExpWidth, maxSigWidth))
dcmp.io.a := in.in1
dcmp.io.b := in.in2
dcmp.io.signaling := !in.rm(1)
val tag = in.typeTagOut
val toint_ieee = (floatTypes.map(t => if (t == FType.H) Fill(maxType.ieeeWidth / minXLen, ieee(in.in1)(15, 0).sextTo(minXLen))
else Fill(maxType.ieeeWidth / t.ieeeWidth, ieee(in.in1)(t.ieeeWidth - 1, 0))): Seq[UInt])(tag)
val toint = WireDefault(toint_ieee)
val intType = WireDefault(in.fmt(0))
io.out.bits.store := (floatTypes.map(t => Fill(fLen / t.ieeeWidth, ieee(in.in1)(t.ieeeWidth - 1, 0))): Seq[UInt])(tag)
io.out.bits.toint := ((0 until nIntTypes).map(i => toint((minXLen << i) - 1, 0).sextTo(xLen)): Seq[UInt])(intType)
io.out.bits.exc := 0.U
when (in.rm(0)) {
val classify_out = (floatTypes.map(t => t.classify(maxType.unsafeConvert(in.in1, t))): Seq[UInt])(tag)
toint := classify_out | (toint_ieee >> minXLen << minXLen)
intType := false.B
}
when (in.wflags) { // feq/flt/fle, fcvt
toint := (~in.rm & Cat(dcmp.io.lt, dcmp.io.eq)).orR | (toint_ieee >> minXLen << minXLen)
io.out.bits.exc := dcmp.io.exceptionFlags
intType := false.B
when (!in.ren2) { // fcvt
val cvtType = in.typ.extract(log2Ceil(nIntTypes), 1)
intType := cvtType
val conv = Module(new hardfloat.RecFNToIN(maxExpWidth, maxSigWidth, xLen))
conv.io.in := in.in1
conv.io.roundingMode := in.rm
conv.io.signedOut := ~in.typ(0)
toint := conv.io.out
io.out.bits.exc := Cat(conv.io.intExceptionFlags(2, 1).orR, 0.U(3.W), conv.io.intExceptionFlags(0))
for (i <- 0 until nIntTypes-1) {
val w = minXLen << i
when (cvtType === i.U) {
val narrow = Module(new hardfloat.RecFNToIN(maxExpWidth, maxSigWidth, w))
narrow.io.in := in.in1
narrow.io.roundingMode := in.rm
narrow.io.signedOut := ~in.typ(0)
val excSign = in.in1(maxExpWidth + maxSigWidth) && !maxType.isNaN(in.in1)
val excOut = Cat(conv.io.signedOut === excSign, Fill(w-1, !excSign))
val invalid = conv.io.intExceptionFlags(2) || narrow.io.intExceptionFlags(1)
when (invalid) { toint := Cat(conv.io.out >> w, excOut) }
io.out.bits.exc := Cat(invalid, 0.U(3.W), !invalid && conv.io.intExceptionFlags(0))
}
}
}
}
io.out.valid := valid
io.out.bits.lt := dcmp.io.lt || (dcmp.io.a.asSInt < 0.S && dcmp.io.b.asSInt >= 0.S)
io.out.bits.in := in
}
class IntToFP(val latency: Int)(implicit p: Parameters) extends FPUModule()(p) with ShouldBeRetimed {
val io = IO(new Bundle {
val in = Flipped(Valid(new IntToFPInput))
val out = Valid(new FPResult)
})
val in = Pipe(io.in)
val tag = in.bits.typeTagIn
val mux = Wire(new FPResult)
mux.exc := 0.U
mux.data := recode(in.bits.in1, tag)
val intValue = {
val res = WireDefault(in.bits.in1.asSInt)
for (i <- 0 until nIntTypes-1) {
val smallInt = in.bits.in1((minXLen << i) - 1, 0)
when (in.bits.typ.extract(log2Ceil(nIntTypes), 1) === i.U) {
res := Mux(in.bits.typ(0), smallInt.zext, smallInt.asSInt)
}
}
res.asUInt
}
when (in.bits.wflags) { // fcvt
// could be improved for RVD/RVQ with a single variable-position rounding
// unit, rather than N fixed-position ones
val i2fResults = for (t <- floatTypes) yield {
val i2f = Module(new hardfloat.INToRecFN(xLen, t.exp, t.sig))
i2f.io.signedIn := ~in.bits.typ(0)
i2f.io.in := intValue
i2f.io.roundingMode := in.bits.rm
i2f.io.detectTininess := hardfloat.consts.tininess_afterRounding
(sanitizeNaN(i2f.io.out, t), i2f.io.exceptionFlags)
}
val (data, exc) = i2fResults.unzip
val dataPadded = data.init.map(d => Cat(data.last >> d.getWidth, d)) :+ data.last
mux.data := dataPadded(tag)
mux.exc := exc(tag)
}
io.out <> Pipe(in.valid, mux, latency-1)
}
class FPToFP(val latency: Int)(implicit p: Parameters) extends FPUModule()(p) with ShouldBeRetimed {
val io = IO(new Bundle {
val in = Flipped(Valid(new FPInput))
val out = Valid(new FPResult)
val lt = Input(Bool()) // from FPToInt
})
val in = Pipe(io.in)
val signNum = Mux(in.bits.rm(1), in.bits.in1 ^ in.bits.in2, Mux(in.bits.rm(0), ~in.bits.in2, in.bits.in2))
val fsgnj = Cat(signNum(fLen), in.bits.in1(fLen-1, 0))
val fsgnjMux = Wire(new FPResult)
fsgnjMux.exc := 0.U
fsgnjMux.data := fsgnj
when (in.bits.wflags) { // fmin/fmax
val isnan1 = maxType.isNaN(in.bits.in1)
val isnan2 = maxType.isNaN(in.bits.in2)
val isInvalid = maxType.isSNaN(in.bits.in1) || maxType.isSNaN(in.bits.in2)
val isNaNOut = isnan1 && isnan2
val isLHS = isnan2 || in.bits.rm(0) =/= io.lt && !isnan1
fsgnjMux.exc := isInvalid << 4
fsgnjMux.data := Mux(isNaNOut, maxType.qNaN, Mux(isLHS, in.bits.in1, in.bits.in2))
}
val inTag = in.bits.typeTagIn
val outTag = in.bits.typeTagOut
val mux = WireDefault(fsgnjMux)
for (t <- floatTypes.init) {
when (outTag === typeTag(t).U) {
mux.data := Cat(fsgnjMux.data >> t.recodedWidth, maxType.unsafeConvert(fsgnjMux.data, t))
}
}
when (in.bits.wflags && !in.bits.ren2) { // fcvt
if (floatTypes.size > 1) {
// widening conversions simply canonicalize NaN operands
val widened = Mux(maxType.isNaN(in.bits.in1), maxType.qNaN, in.bits.in1)
fsgnjMux.data := widened
fsgnjMux.exc := maxType.isSNaN(in.bits.in1) << 4
// narrowing conversions require rounding (for RVQ, this could be
// optimized to use a single variable-position rounding unit, rather
// than two fixed-position ones)
for (outType <- floatTypes.init) when (outTag === typeTag(outType).U && ((typeTag(outType) == 0).B || outTag < inTag)) {
val narrower = Module(new hardfloat.RecFNToRecFN(maxType.exp, maxType.sig, outType.exp, outType.sig))
narrower.io.in := in.bits.in1
narrower.io.roundingMode := in.bits.rm
narrower.io.detectTininess := hardfloat.consts.tininess_afterRounding
val narrowed = sanitizeNaN(narrower.io.out, outType)
mux.data := Cat(fsgnjMux.data >> narrowed.getWidth, narrowed)
mux.exc := narrower.io.exceptionFlags
}
}
}
io.out <> Pipe(in.valid, mux, latency-1)
}
class MulAddRecFNPipe(latency: Int, expWidth: Int, sigWidth: Int) extends Module
{
override def desiredName = s"MulAddRecFNPipe_l${latency}_e${expWidth}_s${sigWidth}"
require(latency<=2)
val io = IO(new Bundle {
val validin = Input(Bool())
val op = Input(Bits(2.W))
val a = Input(Bits((expWidth + sigWidth + 1).W))
val b = Input(Bits((expWidth + sigWidth + 1).W))
val c = Input(Bits((expWidth + sigWidth + 1).W))
val roundingMode = Input(UInt(3.W))
val detectTininess = Input(UInt(1.W))
val out = Output(Bits((expWidth + sigWidth + 1).W))
val exceptionFlags = Output(Bits(5.W))
val validout = Output(Bool())
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val mulAddRecFNToRaw_preMul = Module(new hardfloat.MulAddRecFNToRaw_preMul(expWidth, sigWidth))
val mulAddRecFNToRaw_postMul = Module(new hardfloat.MulAddRecFNToRaw_postMul(expWidth, sigWidth))
mulAddRecFNToRaw_preMul.io.op := io.op
mulAddRecFNToRaw_preMul.io.a := io.a
mulAddRecFNToRaw_preMul.io.b := io.b
mulAddRecFNToRaw_preMul.io.c := io.c
val mulAddResult =
(mulAddRecFNToRaw_preMul.io.mulAddA *
mulAddRecFNToRaw_preMul.io.mulAddB) +&
mulAddRecFNToRaw_preMul.io.mulAddC
val valid_stage0 = Wire(Bool())
val roundingMode_stage0 = Wire(UInt(3.W))
val detectTininess_stage0 = Wire(UInt(1.W))
val postmul_regs = if(latency>0) 1 else 0
mulAddRecFNToRaw_postMul.io.fromPreMul := Pipe(io.validin, mulAddRecFNToRaw_preMul.io.toPostMul, postmul_regs).bits
mulAddRecFNToRaw_postMul.io.mulAddResult := Pipe(io.validin, mulAddResult, postmul_regs).bits
mulAddRecFNToRaw_postMul.io.roundingMode := Pipe(io.validin, io.roundingMode, postmul_regs).bits
roundingMode_stage0 := Pipe(io.validin, io.roundingMode, postmul_regs).bits
detectTininess_stage0 := Pipe(io.validin, io.detectTininess, postmul_regs).bits
valid_stage0 := Pipe(io.validin, false.B, postmul_regs).valid
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val roundRawFNToRecFN = Module(new hardfloat.RoundRawFNToRecFN(expWidth, sigWidth, 0))
val round_regs = if(latency==2) 1 else 0
roundRawFNToRecFN.io.invalidExc := Pipe(valid_stage0, mulAddRecFNToRaw_postMul.io.invalidExc, round_regs).bits
roundRawFNToRecFN.io.in := Pipe(valid_stage0, mulAddRecFNToRaw_postMul.io.rawOut, round_regs).bits
roundRawFNToRecFN.io.roundingMode := Pipe(valid_stage0, roundingMode_stage0, round_regs).bits
roundRawFNToRecFN.io.detectTininess := Pipe(valid_stage0, detectTininess_stage0, round_regs).bits
io.validout := Pipe(valid_stage0, false.B, round_regs).valid
roundRawFNToRecFN.io.infiniteExc := false.B
io.out := roundRawFNToRecFN.io.out
io.exceptionFlags := roundRawFNToRecFN.io.exceptionFlags
}
class FPUFMAPipe(val latency: Int, val t: FType)
(implicit p: Parameters) extends FPUModule()(p) with ShouldBeRetimed {
override def desiredName = s"FPUFMAPipe_l${latency}_f${t.ieeeWidth}"
require(latency>0)
val io = IO(new Bundle {
val in = Flipped(Valid(new FPInput))
val out = Valid(new FPResult)
})
val valid = RegNext(io.in.valid)
val in = Reg(new FPInput)
when (io.in.valid) {
val one = 1.U << (t.sig + t.exp - 1)
val zero = (io.in.bits.in1 ^ io.in.bits.in2) & (1.U << (t.sig + t.exp))
val cmd_fma = io.in.bits.ren3
val cmd_addsub = io.in.bits.swap23
in := io.in.bits
when (cmd_addsub) { in.in2 := one }
when (!(cmd_fma || cmd_addsub)) { in.in3 := zero }
}
val fma = Module(new MulAddRecFNPipe((latency-1) min 2, t.exp, t.sig))
fma.io.validin := valid
fma.io.op := in.fmaCmd
fma.io.roundingMode := in.rm
fma.io.detectTininess := hardfloat.consts.tininess_afterRounding
fma.io.a := in.in1
fma.io.b := in.in2
fma.io.c := in.in3
val res = Wire(new FPResult)
res.data := sanitizeNaN(fma.io.out, t)
res.exc := fma.io.exceptionFlags
io.out := Pipe(fma.io.validout, res, (latency-3) max 0)
}
class FPU(cfg: FPUParams)(implicit p: Parameters) extends FPUModule()(p) {
val io = IO(new FPUIO)
val (useClockGating, useDebugROB) = coreParams match {
case r: RocketCoreParams =>
val sz = if (r.debugROB.isDefined) r.debugROB.get.size else 1
(r.clockGate, sz < 1)
case _ => (false, false)
}
val clock_en_reg = Reg(Bool())
val clock_en = clock_en_reg || io.cp_req.valid
val gated_clock =
if (!useClockGating) clock
else ClockGate(clock, clock_en, "fpu_clock_gate")
val fp_decoder = Module(new FPUDecoder)
fp_decoder.io.inst := io.inst
val id_ctrl = WireInit(fp_decoder.io.sigs)
coreParams match { case r: RocketCoreParams => r.vector.map(v => {
val v_decode = v.decoder(p) // Only need to get ren1
v_decode.io.inst := io.inst
v_decode.io.vconfig := DontCare // core deals with this
when (v_decode.io.legal && v_decode.io.read_frs1) {
id_ctrl.ren1 := true.B
id_ctrl.swap12 := false.B
id_ctrl.toint := true.B
id_ctrl.typeTagIn := I
id_ctrl.typeTagOut := Mux(io.v_sew === 3.U, D, S)
}
when (v_decode.io.write_frd) { id_ctrl.wen := true.B }
})}
val ex_reg_valid = RegNext(io.valid, false.B)
val ex_reg_inst = RegEnable(io.inst, io.valid)
val ex_reg_ctrl = RegEnable(id_ctrl, io.valid)
val ex_ra = List.fill(3)(Reg(UInt()))
// load/vector response
val load_wb = RegNext(io.ll_resp_val)
val load_wb_typeTag = RegEnable(io.ll_resp_type(1,0) - typeTagWbOffset, io.ll_resp_val)
val load_wb_data = RegEnable(io.ll_resp_data, io.ll_resp_val)
val load_wb_tag = RegEnable(io.ll_resp_tag, io.ll_resp_val)
class FPUImpl { // entering gated-clock domain
val req_valid = ex_reg_valid || io.cp_req.valid
val ex_cp_valid = io.cp_req.fire
val mem_cp_valid = RegNext(ex_cp_valid, false.B)
val wb_cp_valid = RegNext(mem_cp_valid, false.B)
val mem_reg_valid = RegInit(false.B)
val killm = (io.killm || io.nack_mem) && !mem_cp_valid
// Kill X-stage instruction if M-stage is killed. This prevents it from
// speculatively being sent to the div-sqrt unit, which can cause priority
// inversion for two back-to-back divides, the first of which is killed.
val killx = io.killx || mem_reg_valid && killm
mem_reg_valid := ex_reg_valid && !killx || ex_cp_valid
val mem_reg_inst = RegEnable(ex_reg_inst, ex_reg_valid)
val wb_reg_valid = RegNext(mem_reg_valid && (!killm || mem_cp_valid), false.B)
val cp_ctrl = Wire(new FPUCtrlSigs)
cp_ctrl :<>= io.cp_req.bits.viewAsSupertype(new FPUCtrlSigs)
io.cp_resp.valid := false.B
io.cp_resp.bits.data := 0.U
io.cp_resp.bits.exc := DontCare
val ex_ctrl = Mux(ex_cp_valid, cp_ctrl, ex_reg_ctrl)
val mem_ctrl = RegEnable(ex_ctrl, req_valid)
val wb_ctrl = RegEnable(mem_ctrl, mem_reg_valid)
// CoreMonitorBundle to monitor fp register file writes
val frfWriteBundle = Seq.fill(2)(WireInit(new CoreMonitorBundle(xLen, fLen), DontCare))
frfWriteBundle.foreach { i =>
i.clock := clock
i.reset := reset
i.hartid := io.hartid
i.timer := io.time(31,0)
i.valid := false.B
i.wrenx := false.B
i.wrenf := false.B
i.excpt := false.B
}
// regfile
val regfile = Mem(32, Bits((fLen+1).W))
when (load_wb) {
val wdata = recode(load_wb_data, load_wb_typeTag)
regfile(load_wb_tag) := wdata
assert(consistent(wdata))
if (enableCommitLog)
printf("f%d p%d 0x%x\n", load_wb_tag, load_wb_tag + 32.U, ieee(wdata))
if (useDebugROB)
DebugROB.pushWb(clock, reset, io.hartid, load_wb, load_wb_tag + 32.U, ieee(wdata))
frfWriteBundle(0).wrdst := load_wb_tag
frfWriteBundle(0).wrenf := true.B
frfWriteBundle(0).wrdata := ieee(wdata)
}
val ex_rs = ex_ra.map(a => regfile(a))
when (io.valid) {
when (id_ctrl.ren1) {
when (!id_ctrl.swap12) { ex_ra(0) := io.inst(19,15) }
when (id_ctrl.swap12) { ex_ra(1) := io.inst(19,15) }
}
when (id_ctrl.ren2) {
when (id_ctrl.swap12) { ex_ra(0) := io.inst(24,20) }
when (id_ctrl.swap23) { ex_ra(2) := io.inst(24,20) }
when (!id_ctrl.swap12 && !id_ctrl.swap23) { ex_ra(1) := io.inst(24,20) }
}
when (id_ctrl.ren3) { ex_ra(2) := io.inst(31,27) }
}
val ex_rm = Mux(ex_reg_inst(14,12) === 7.U, io.fcsr_rm, ex_reg_inst(14,12))
def fuInput(minT: Option[FType]): FPInput = {
val req = Wire(new FPInput)
val tag = ex_ctrl.typeTagIn
req.viewAsSupertype(new Bundle with HasFPUCtrlSigs) :#= ex_ctrl.viewAsSupertype(new Bundle with HasFPUCtrlSigs)
req.rm := ex_rm
req.in1 := unbox(ex_rs(0), tag, minT)
req.in2 := unbox(ex_rs(1), tag, minT)
req.in3 := unbox(ex_rs(2), tag, minT)
req.typ := ex_reg_inst(21,20)
req.fmt := ex_reg_inst(26,25)
req.fmaCmd := ex_reg_inst(3,2) | (!ex_ctrl.ren3 && ex_reg_inst(27))
when (ex_cp_valid) {
req := io.cp_req.bits
when (io.cp_req.bits.swap12) {
req.in1 := io.cp_req.bits.in2
req.in2 := io.cp_req.bits.in1
}
when (io.cp_req.bits.swap23) {
req.in2 := io.cp_req.bits.in3
req.in3 := io.cp_req.bits.in2
}
}
req
}
val sfma = Module(new FPUFMAPipe(cfg.sfmaLatency, FType.S))
sfma.io.in.valid := req_valid && ex_ctrl.fma && ex_ctrl.typeTagOut === S
sfma.io.in.bits := fuInput(Some(sfma.t))
val fpiu = Module(new FPToInt)
fpiu.io.in.valid := req_valid && (ex_ctrl.toint || ex_ctrl.div || ex_ctrl.sqrt || (ex_ctrl.fastpipe && ex_ctrl.wflags))
fpiu.io.in.bits := fuInput(None)
io.store_data := fpiu.io.out.bits.store
io.toint_data := fpiu.io.out.bits.toint
when(fpiu.io.out.valid && mem_cp_valid && mem_ctrl.toint){
io.cp_resp.bits.data := fpiu.io.out.bits.toint
io.cp_resp.valid := true.B
}
val ifpu = Module(new IntToFP(cfg.ifpuLatency))
ifpu.io.in.valid := req_valid && ex_ctrl.fromint
ifpu.io.in.bits := fpiu.io.in.bits
ifpu.io.in.bits.in1 := Mux(ex_cp_valid, io.cp_req.bits.in1, io.fromint_data)
val fpmu = Module(new FPToFP(cfg.fpmuLatency))
fpmu.io.in.valid := req_valid && ex_ctrl.fastpipe
fpmu.io.in.bits := fpiu.io.in.bits
fpmu.io.lt := fpiu.io.out.bits.lt
val divSqrt_wen = WireDefault(false.B)
val divSqrt_inFlight = WireDefault(false.B)
val divSqrt_waddr = Reg(UInt(5.W))
val divSqrt_cp = Reg(Bool())
val divSqrt_typeTag = Wire(UInt(log2Up(floatTypes.size).W))
val divSqrt_wdata = Wire(UInt((fLen+1).W))
val divSqrt_flags = Wire(UInt(FPConstants.FLAGS_SZ.W))
divSqrt_typeTag := DontCare
divSqrt_wdata := DontCare
divSqrt_flags := DontCare
// writeback arbitration
case class Pipe(p: Module, lat: Int, cond: (FPUCtrlSigs) => Bool, res: FPResult)
val pipes = List(
Pipe(fpmu, fpmu.latency, (c: FPUCtrlSigs) => c.fastpipe, fpmu.io.out.bits),
Pipe(ifpu, ifpu.latency, (c: FPUCtrlSigs) => c.fromint, ifpu.io.out.bits),
Pipe(sfma, sfma.latency, (c: FPUCtrlSigs) => c.fma && c.typeTagOut === S, sfma.io.out.bits)) ++
(fLen > 32).option({
val dfma = Module(new FPUFMAPipe(cfg.dfmaLatency, FType.D))
dfma.io.in.valid := req_valid && ex_ctrl.fma && ex_ctrl.typeTagOut === D
dfma.io.in.bits := fuInput(Some(dfma.t))
Pipe(dfma, dfma.latency, (c: FPUCtrlSigs) => c.fma && c.typeTagOut === D, dfma.io.out.bits)
}) ++
(minFLen == 16).option({
val hfma = Module(new FPUFMAPipe(cfg.sfmaLatency, FType.H))
hfma.io.in.valid := req_valid && ex_ctrl.fma && ex_ctrl.typeTagOut === H
hfma.io.in.bits := fuInput(Some(hfma.t))
Pipe(hfma, hfma.latency, (c: FPUCtrlSigs) => c.fma && c.typeTagOut === H, hfma.io.out.bits)
})
def latencyMask(c: FPUCtrlSigs, offset: Int) = {
require(pipes.forall(_.lat >= offset))
pipes.map(p => Mux(p.cond(c), (1 << p.lat-offset).U, 0.U)).reduce(_|_)
}
def pipeid(c: FPUCtrlSigs) = pipes.zipWithIndex.map(p => Mux(p._1.cond(c), p._2.U, 0.U)).reduce(_|_)
val maxLatency = pipes.map(_.lat).max
val memLatencyMask = latencyMask(mem_ctrl, 2)
class WBInfo extends Bundle {
val rd = UInt(5.W)
val typeTag = UInt(log2Up(floatTypes.size).W)
val cp = Bool()
val pipeid = UInt(log2Ceil(pipes.size).W)
}
val wen = RegInit(0.U((maxLatency-1).W))
val wbInfo = Reg(Vec(maxLatency-1, new WBInfo))
val mem_wen = mem_reg_valid && (mem_ctrl.fma || mem_ctrl.fastpipe || mem_ctrl.fromint)
val write_port_busy = RegEnable(mem_wen && (memLatencyMask & latencyMask(ex_ctrl, 1)).orR || (wen & latencyMask(ex_ctrl, 0)).orR, req_valid)
ccover(mem_reg_valid && write_port_busy, "WB_STRUCTURAL", "structural hazard on writeback")
for (i <- 0 until maxLatency-2) {
when (wen(i+1)) { wbInfo(i) := wbInfo(i+1) }
}
wen := wen >> 1
when (mem_wen) {
when (!killm) {
wen := wen >> 1 | memLatencyMask
}
for (i <- 0 until maxLatency-1) {
when (!write_port_busy && memLatencyMask(i)) {
wbInfo(i).cp := mem_cp_valid
wbInfo(i).typeTag := mem_ctrl.typeTagOut
wbInfo(i).pipeid := pipeid(mem_ctrl)
wbInfo(i).rd := mem_reg_inst(11,7)
}
}
}
val waddr = Mux(divSqrt_wen, divSqrt_waddr, wbInfo(0).rd)
val wb_cp = Mux(divSqrt_wen, divSqrt_cp, wbInfo(0).cp)
val wtypeTag = Mux(divSqrt_wen, divSqrt_typeTag, wbInfo(0).typeTag)
val wdata = box(Mux(divSqrt_wen, divSqrt_wdata, (pipes.map(_.res.data): Seq[UInt])(wbInfo(0).pipeid)), wtypeTag)
val wexc = (pipes.map(_.res.exc): Seq[UInt])(wbInfo(0).pipeid)
when ((!wbInfo(0).cp && wen(0)) || divSqrt_wen) {
assert(consistent(wdata))
regfile(waddr) := wdata
if (enableCommitLog) {
printf("f%d p%d 0x%x\n", waddr, waddr + 32.U, ieee(wdata))
}
frfWriteBundle(1).wrdst := waddr
frfWriteBundle(1).wrenf := true.B
frfWriteBundle(1).wrdata := ieee(wdata)
}
if (useDebugROB) {
DebugROB.pushWb(clock, reset, io.hartid, (!wbInfo(0).cp && wen(0)) || divSqrt_wen, waddr + 32.U, ieee(wdata))
}
when (wb_cp && (wen(0) || divSqrt_wen)) {
io.cp_resp.bits.data := wdata
io.cp_resp.valid := true.B
}
assert(!io.cp_req.valid || pipes.forall(_.lat == pipes.head.lat).B,
s"FPU only supports coprocessor if FMA pipes have uniform latency ${pipes.map(_.lat)}")
// Avoid structural hazards and nacking of external requests
// toint responds in the MEM stage, so an incoming toint can induce a structural hazard against inflight FMAs
io.cp_req.ready := !ex_reg_valid && !(cp_ctrl.toint && wen =/= 0.U) && !divSqrt_inFlight
val wb_toint_valid = wb_reg_valid && wb_ctrl.toint
val wb_toint_exc = RegEnable(fpiu.io.out.bits.exc, mem_ctrl.toint)
io.fcsr_flags.valid := wb_toint_valid || divSqrt_wen || wen(0)
io.fcsr_flags.bits :=
Mux(wb_toint_valid, wb_toint_exc, 0.U) |
Mux(divSqrt_wen, divSqrt_flags, 0.U) |
Mux(wen(0), wexc, 0.U)
val divSqrt_write_port_busy = (mem_ctrl.div || mem_ctrl.sqrt) && wen.orR
io.fcsr_rdy := !(ex_reg_valid && ex_ctrl.wflags || mem_reg_valid && mem_ctrl.wflags || wb_reg_valid && wb_ctrl.toint || wen.orR || divSqrt_inFlight)
io.nack_mem := (write_port_busy || divSqrt_write_port_busy || divSqrt_inFlight) && !mem_cp_valid
io.dec <> id_ctrl
def useScoreboard(f: ((Pipe, Int)) => Bool) = pipes.zipWithIndex.filter(_._1.lat > 3).map(x => f(x)).fold(false.B)(_||_)
io.sboard_set := wb_reg_valid && !wb_cp_valid && RegNext(useScoreboard(_._1.cond(mem_ctrl)) || mem_ctrl.div || mem_ctrl.sqrt || mem_ctrl.vec)
io.sboard_clr := !wb_cp_valid && (divSqrt_wen || (wen(0) && useScoreboard(x => wbInfo(0).pipeid === x._2.U)))
io.sboard_clra := waddr
ccover(io.sboard_clr && load_wb, "DUAL_WRITEBACK", "load and FMA writeback on same cycle")
// we don't currently support round-max-magnitude (rm=4)
io.illegal_rm := io.inst(14,12).isOneOf(5.U, 6.U) || io.inst(14,12) === 7.U && io.fcsr_rm >= 5.U
if (cfg.divSqrt) {
val divSqrt_inValid = mem_reg_valid && (mem_ctrl.div || mem_ctrl.sqrt) && !divSqrt_inFlight
val divSqrt_killed = RegNext(divSqrt_inValid && killm, true.B)
when (divSqrt_inValid) {
divSqrt_waddr := mem_reg_inst(11,7)
divSqrt_cp := mem_cp_valid
}
ccover(divSqrt_inFlight && divSqrt_killed, "DIV_KILLED", "divide killed after issued to divider")
ccover(divSqrt_inFlight && mem_reg_valid && (mem_ctrl.div || mem_ctrl.sqrt), "DIV_BUSY", "divider structural hazard")
ccover(mem_reg_valid && divSqrt_write_port_busy, "DIV_WB_STRUCTURAL", "structural hazard on division writeback")
for (t <- floatTypes) {
val tag = mem_ctrl.typeTagOut
val divSqrt = withReset(divSqrt_killed) { Module(new hardfloat.DivSqrtRecFN_small(t.exp, t.sig, 0)) }
divSqrt.io.inValid := divSqrt_inValid && tag === typeTag(t).U
divSqrt.io.sqrtOp := mem_ctrl.sqrt
divSqrt.io.a := maxType.unsafeConvert(fpiu.io.out.bits.in.in1, t)
divSqrt.io.b := maxType.unsafeConvert(fpiu.io.out.bits.in.in2, t)
divSqrt.io.roundingMode := fpiu.io.out.bits.in.rm
divSqrt.io.detectTininess := hardfloat.consts.tininess_afterRounding
when (!divSqrt.io.inReady) { divSqrt_inFlight := true.B } // only 1 in flight
when (divSqrt.io.outValid_div || divSqrt.io.outValid_sqrt) {
divSqrt_wen := !divSqrt_killed
divSqrt_wdata := sanitizeNaN(divSqrt.io.out, t)
divSqrt_flags := divSqrt.io.exceptionFlags
divSqrt_typeTag := typeTag(t).U
}
}
when (divSqrt_killed) { divSqrt_inFlight := false.B }
} else {
when (id_ctrl.div || id_ctrl.sqrt) { io.illegal_rm := true.B }
}
// gate the clock
clock_en_reg := !useClockGating.B ||
io.keep_clock_enabled || // chicken bit
io.valid || // ID stage
req_valid || // EX stage
mem_reg_valid || mem_cp_valid || // MEM stage
wb_reg_valid || wb_cp_valid || // WB stage
wen.orR || divSqrt_inFlight || // post-WB stage
io.ll_resp_val // load writeback
} // leaving gated-clock domain
val fpuImpl = withClock (gated_clock) { new FPUImpl }
def ccover(cond: Bool, label: String, desc: String)(implicit sourceInfo: SourceInfo) =
property.cover(cond, s"FPU_$label", "Core;;" + desc)
}
| module FPUFMAPipe_l4_f32( // @[FPU.scala:697:7]
input clock, // @[FPU.scala:697:7]
input reset, // @[FPU.scala:697:7]
input io_in_valid, // @[FPU.scala:702:14]
input io_in_bits_ldst, // @[FPU.scala:702:14]
input io_in_bits_wen, // @[FPU.scala:702:14]
input io_in_bits_ren1, // @[FPU.scala:702:14]
input io_in_bits_ren2, // @[FPU.scala:702:14]
input io_in_bits_ren3, // @[FPU.scala:702:14]
input io_in_bits_swap12, // @[FPU.scala:702:14]
input io_in_bits_swap23, // @[FPU.scala:702:14]
input [1:0] io_in_bits_typeTagIn, // @[FPU.scala:702:14]
input [1:0] io_in_bits_typeTagOut, // @[FPU.scala:702:14]
input io_in_bits_fromint, // @[FPU.scala:702:14]
input io_in_bits_toint, // @[FPU.scala:702:14]
input io_in_bits_fastpipe, // @[FPU.scala:702:14]
input io_in_bits_fma, // @[FPU.scala:702:14]
input io_in_bits_div, // @[FPU.scala:702:14]
input io_in_bits_sqrt, // @[FPU.scala:702:14]
input io_in_bits_wflags, // @[FPU.scala:702:14]
input [2:0] io_in_bits_rm, // @[FPU.scala:702:14]
input [1:0] io_in_bits_fmaCmd, // @[FPU.scala:702:14]
input [1:0] io_in_bits_typ, // @[FPU.scala:702:14]
input [1:0] io_in_bits_fmt, // @[FPU.scala:702:14]
input [64:0] io_in_bits_in1, // @[FPU.scala:702:14]
input [64:0] io_in_bits_in2, // @[FPU.scala:702:14]
input [64:0] io_in_bits_in3, // @[FPU.scala:702:14]
output io_out_valid, // @[FPU.scala:702:14]
output [64:0] io_out_bits_data, // @[FPU.scala:702:14]
output [4:0] io_out_bits_exc // @[FPU.scala:702:14]
);
wire [32:0] _fma_io_out; // @[FPU.scala:719:19]
wire _fma_io_validout; // @[FPU.scala:719:19]
wire io_in_valid_0 = io_in_valid; // @[FPU.scala:697:7]
wire io_in_bits_ldst_0 = io_in_bits_ldst; // @[FPU.scala:697:7]
wire io_in_bits_wen_0 = io_in_bits_wen; // @[FPU.scala:697:7]
wire io_in_bits_ren1_0 = io_in_bits_ren1; // @[FPU.scala:697:7]
wire io_in_bits_ren2_0 = io_in_bits_ren2; // @[FPU.scala:697:7]
wire io_in_bits_ren3_0 = io_in_bits_ren3; // @[FPU.scala:697:7]
wire io_in_bits_swap12_0 = io_in_bits_swap12; // @[FPU.scala:697:7]
wire io_in_bits_swap23_0 = io_in_bits_swap23; // @[FPU.scala:697:7]
wire [1:0] io_in_bits_typeTagIn_0 = io_in_bits_typeTagIn; // @[FPU.scala:697:7]
wire [1:0] io_in_bits_typeTagOut_0 = io_in_bits_typeTagOut; // @[FPU.scala:697:7]
wire io_in_bits_fromint_0 = io_in_bits_fromint; // @[FPU.scala:697:7]
wire io_in_bits_toint_0 = io_in_bits_toint; // @[FPU.scala:697:7]
wire io_in_bits_fastpipe_0 = io_in_bits_fastpipe; // @[FPU.scala:697:7]
wire io_in_bits_fma_0 = io_in_bits_fma; // @[FPU.scala:697:7]
wire io_in_bits_div_0 = io_in_bits_div; // @[FPU.scala:697:7]
wire io_in_bits_sqrt_0 = io_in_bits_sqrt; // @[FPU.scala:697:7]
wire io_in_bits_wflags_0 = io_in_bits_wflags; // @[FPU.scala:697:7]
wire [2:0] io_in_bits_rm_0 = io_in_bits_rm; // @[FPU.scala:697:7]
wire [1:0] io_in_bits_fmaCmd_0 = io_in_bits_fmaCmd; // @[FPU.scala:697:7]
wire [1:0] io_in_bits_typ_0 = io_in_bits_typ; // @[FPU.scala:697:7]
wire [1:0] io_in_bits_fmt_0 = io_in_bits_fmt; // @[FPU.scala:697:7]
wire [64:0] io_in_bits_in1_0 = io_in_bits_in1; // @[FPU.scala:697:7]
wire [64:0] io_in_bits_in2_0 = io_in_bits_in2; // @[FPU.scala:697:7]
wire [64:0] io_in_bits_in3_0 = io_in_bits_in3; // @[FPU.scala:697:7]
wire [31:0] one = 32'h80000000; // @[FPU.scala:710:19]
wire [32:0] _zero_T_1 = 33'h100000000; // @[FPU.scala:711:57]
wire io_in_bits_vec = 1'h0; // @[FPU.scala:697:7]
wire io_out_pipe_out_valid; // @[Valid.scala:135:21]
wire [64:0] io_out_pipe_out_bits_data; // @[Valid.scala:135:21]
wire [4:0] io_out_pipe_out_bits_exc; // @[Valid.scala:135:21]
wire [64:0] io_out_bits_data_0; // @[FPU.scala:697:7]
wire [4:0] io_out_bits_exc_0; // @[FPU.scala:697:7]
wire io_out_valid_0; // @[FPU.scala:697:7]
reg valid; // @[FPU.scala:707:22]
reg in_ldst; // @[FPU.scala:708:15]
reg in_wen; // @[FPU.scala:708:15]
reg in_ren1; // @[FPU.scala:708:15]
reg in_ren2; // @[FPU.scala:708:15]
reg in_ren3; // @[FPU.scala:708:15]
reg in_swap12; // @[FPU.scala:708:15]
reg in_swap23; // @[FPU.scala:708:15]
reg [1:0] in_typeTagIn; // @[FPU.scala:708:15]
reg [1:0] in_typeTagOut; // @[FPU.scala:708:15]
reg in_fromint; // @[FPU.scala:708:15]
reg in_toint; // @[FPU.scala:708:15]
reg in_fastpipe; // @[FPU.scala:708:15]
reg in_fma; // @[FPU.scala:708:15]
reg in_div; // @[FPU.scala:708:15]
reg in_sqrt; // @[FPU.scala:708:15]
reg in_wflags; // @[FPU.scala:708:15]
reg [2:0] in_rm; // @[FPU.scala:708:15]
reg [1:0] in_fmaCmd; // @[FPU.scala:708:15]
reg [1:0] in_typ; // @[FPU.scala:708:15]
reg [1:0] in_fmt; // @[FPU.scala:708:15]
reg [64:0] in_in1; // @[FPU.scala:708:15]
reg [64:0] in_in2; // @[FPU.scala:708:15]
reg [64:0] in_in3; // @[FPU.scala:708:15]
wire [64:0] _zero_T = io_in_bits_in1_0 ^ io_in_bits_in2_0; // @[FPU.scala:697:7, :711:32]
wire [64:0] zero = {32'h0, _zero_T[32], 32'h0}; // @[FPU.scala:711:{32,50}]
wire [64:0] res_data; // @[FPU.scala:728:17]
wire [4:0] res_exc; // @[FPU.scala:728:17]
assign res_data = {32'h0, _fma_io_out}; // @[FPU.scala:719:19, :728:17, :729:12]
reg io_out_pipe_v; // @[Valid.scala:141:24]
assign io_out_pipe_out_valid = io_out_pipe_v; // @[Valid.scala:135:21, :141:24]
reg [64:0] io_out_pipe_b_data; // @[Valid.scala:142:26]
assign io_out_pipe_out_bits_data = io_out_pipe_b_data; // @[Valid.scala:135:21, :142:26]
reg [4:0] io_out_pipe_b_exc; // @[Valid.scala:142:26]
assign io_out_pipe_out_bits_exc = io_out_pipe_b_exc; // @[Valid.scala:135:21, :142:26]
assign io_out_valid_0 = io_out_pipe_out_valid; // @[Valid.scala:135:21]
assign io_out_bits_data_0 = io_out_pipe_out_bits_data; // @[Valid.scala:135:21]
assign io_out_bits_exc_0 = io_out_pipe_out_bits_exc; // @[Valid.scala:135:21]
always @(posedge clock) begin // @[FPU.scala:697:7]
valid <= io_in_valid_0; // @[FPU.scala:697:7, :707:22]
if (io_in_valid_0) begin // @[FPU.scala:697:7]
in_ldst <= io_in_bits_ldst_0; // @[FPU.scala:697:7, :708:15]
in_wen <= io_in_bits_wen_0; // @[FPU.scala:697:7, :708:15]
in_ren1 <= io_in_bits_ren1_0; // @[FPU.scala:697:7, :708:15]
in_ren2 <= io_in_bits_ren2_0; // @[FPU.scala:697:7, :708:15]
in_ren3 <= io_in_bits_ren3_0; // @[FPU.scala:697:7, :708:15]
in_swap12 <= io_in_bits_swap12_0; // @[FPU.scala:697:7, :708:15]
in_swap23 <= io_in_bits_swap23_0; // @[FPU.scala:697:7, :708:15]
in_typeTagIn <= io_in_bits_typeTagIn_0; // @[FPU.scala:697:7, :708:15]
in_typeTagOut <= io_in_bits_typeTagOut_0; // @[FPU.scala:697:7, :708:15]
in_fromint <= io_in_bits_fromint_0; // @[FPU.scala:697:7, :708:15]
in_toint <= io_in_bits_toint_0; // @[FPU.scala:697:7, :708:15]
in_fastpipe <= io_in_bits_fastpipe_0; // @[FPU.scala:697:7, :708:15]
in_fma <= io_in_bits_fma_0; // @[FPU.scala:697:7, :708:15]
in_div <= io_in_bits_div_0; // @[FPU.scala:697:7, :708:15]
in_sqrt <= io_in_bits_sqrt_0; // @[FPU.scala:697:7, :708:15]
in_wflags <= io_in_bits_wflags_0; // @[FPU.scala:697:7, :708:15]
in_rm <= io_in_bits_rm_0; // @[FPU.scala:697:7, :708:15]
in_fmaCmd <= io_in_bits_fmaCmd_0; // @[FPU.scala:697:7, :708:15]
in_typ <= io_in_bits_typ_0; // @[FPU.scala:697:7, :708:15]
in_fmt <= io_in_bits_fmt_0; // @[FPU.scala:697:7, :708:15]
in_in1 <= io_in_bits_in1_0; // @[FPU.scala:697:7, :708:15]
in_in2 <= io_in_bits_swap23_0 ? 65'h80000000 : io_in_bits_in2_0; // @[FPU.scala:697:7, :708:15, :714:8, :715:{23,32}]
in_in3 <= io_in_bits_ren3_0 | io_in_bits_swap23_0 ? io_in_bits_in3_0 : zero; // @[FPU.scala:697:7, :708:15, :711:50, :714:8, :716:{21,37,46}]
end
if (_fma_io_validout) begin // @[FPU.scala:719:19]
io_out_pipe_b_data <= res_data; // @[Valid.scala:142:26]
io_out_pipe_b_exc <= res_exc; // @[Valid.scala:142:26]
end
if (reset) // @[FPU.scala:697:7]
io_out_pipe_v <= 1'h0; // @[Valid.scala:141:24]
else // @[FPU.scala:697:7]
io_out_pipe_v <= _fma_io_validout; // @[Valid.scala:141:24]
always @(posedge)
MulAddRecFNPipe_l2_e8_s24_2 fma ( // @[FPU.scala:719:19]
.clock (clock),
.reset (reset),
.io_validin (valid), // @[FPU.scala:707:22]
.io_op (in_fmaCmd), // @[FPU.scala:708:15]
.io_a (in_in1[32:0]), // @[FPU.scala:708:15, :724:12]
.io_b (in_in2[32:0]), // @[FPU.scala:708:15, :725:12]
.io_c (in_in3[32:0]), // @[FPU.scala:708:15, :726:12]
.io_roundingMode (in_rm), // @[FPU.scala:708:15]
.io_out (_fma_io_out),
.io_exceptionFlags (res_exc),
.io_validout (_fma_io_validout)
); // @[FPU.scala:719:19]
assign io_out_valid = io_out_valid_0; // @[FPU.scala:697:7]
assign io_out_bits_data = io_out_bits_data_0; // @[FPU.scala:697:7]
assign io_out_bits_exc = io_out_bits_exc_0; // @[FPU.scala:697:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Monitor.scala:
package constellation.channel
import chisel3._
import chisel3.util._
import freechips.rocketchip.diplomacy._
import org.chipsalliance.cde.config.{Parameters}
import freechips.rocketchip.util._
import constellation.noc.{HasNoCParams}
class NoCMonitor(val cParam: ChannelParams)(implicit val p: Parameters) extends Module with HasNoCParams {
val io = IO(new Bundle {
val in = Input(new Channel(cParam))
})
val in_flight = RegInit(VecInit(Seq.fill(cParam.nVirtualChannels) { false.B }))
for (i <- 0 until cParam.srcSpeedup) {
val flit = io.in.flit(i)
when (flit.valid) {
when (flit.bits.head) {
in_flight(flit.bits.virt_channel_id) := true.B
assert (!in_flight(flit.bits.virt_channel_id), "Flit head/tail sequencing is broken")
}
when (flit.bits.tail) {
in_flight(flit.bits.virt_channel_id) := false.B
}
}
val possibleFlows = cParam.possibleFlows
when (flit.valid && flit.bits.head) {
cParam match {
case n: ChannelParams => n.virtualChannelParams.zipWithIndex.foreach { case (v,i) =>
assert(flit.bits.virt_channel_id =/= i.U || v.possibleFlows.toSeq.map(_.isFlow(flit.bits.flow)).orR)
}
case _ => assert(cParam.possibleFlows.toSeq.map(_.isFlow(flit.bits.flow)).orR)
}
}
}
}
File Types.scala:
package constellation.routing
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.{Parameters}
import constellation.noc.{HasNoCParams}
import constellation.channel.{Flit}
/** A representation for 1 specific virtual channel in wormhole routing
*
* @param src the source node
* @param vc ID for the virtual channel
* @param dst the destination node
* @param n_vc the number of virtual channels
*/
// BEGIN: ChannelRoutingInfo
case class ChannelRoutingInfo(
src: Int,
dst: Int,
vc: Int,
n_vc: Int
) {
// END: ChannelRoutingInfo
require (src >= -1 && dst >= -1 && vc >= 0, s"Illegal $this")
require (!(src == -1 && dst == -1), s"Illegal $this")
require (vc < n_vc, s"Illegal $this")
val isIngress = src == -1
val isEgress = dst == -1
}
/** Represents the properties of a packet that are relevant for routing
* ingressId and egressId uniquely identify a flow, but vnet and dst are used here
* to simplify the implementation of routingrelations
*
* @param ingressId packet's source ingress point
* @param egressId packet's destination egress point
* @param vNet virtual subnetwork identifier
* @param dst packet's destination node ID
*/
// BEGIN: FlowRoutingInfo
case class FlowRoutingInfo(
ingressId: Int,
egressId: Int,
vNetId: Int,
ingressNode: Int,
ingressNodeId: Int,
egressNode: Int,
egressNodeId: Int,
fifo: Boolean
) {
// END: FlowRoutingInfo
def isFlow(f: FlowRoutingBundle): Bool = {
(f.ingress_node === ingressNode.U &&
f.egress_node === egressNode.U &&
f.ingress_node_id === ingressNodeId.U &&
f.egress_node_id === egressNodeId.U)
}
def asLiteral(b: FlowRoutingBundle): BigInt = {
Seq(
(vNetId , b.vnet_id),
(ingressNode , b.ingress_node),
(ingressNodeId , b.ingress_node_id),
(egressNode , b.egress_node),
(egressNodeId , b.egress_node_id)
).foldLeft(0)((l, t) => {
(l << t._2.getWidth) | t._1
})
}
}
class FlowRoutingBundle(implicit val p: Parameters) extends Bundle with HasNoCParams {
// Instead of tracking ingress/egress ID, track the physical destination id and the offset at the destination
// This simplifies the routing tables
val vnet_id = UInt(log2Ceil(nVirtualNetworks).W)
val ingress_node = UInt(log2Ceil(nNodes).W)
val ingress_node_id = UInt(log2Ceil(maxIngressesAtNode).W)
val egress_node = UInt(log2Ceil(nNodes).W)
val egress_node_id = UInt(log2Ceil(maxEgressesAtNode).W)
}
| module NoCMonitor_28( // @[Monitor.scala:11:7]
input clock, // @[Monitor.scala:11:7]
input reset, // @[Monitor.scala:11:7]
input io_in_flit_0_valid, // @[Monitor.scala:12:14]
input io_in_flit_0_bits_head, // @[Monitor.scala:12:14]
input io_in_flit_0_bits_tail, // @[Monitor.scala:12:14]
input [5:0] io_in_flit_0_bits_flow_ingress_node, // @[Monitor.scala:12:14]
input [2:0] io_in_flit_0_bits_flow_ingress_node_id, // @[Monitor.scala:12:14]
input [5:0] io_in_flit_0_bits_flow_egress_node, // @[Monitor.scala:12:14]
input [2:0] io_in_flit_0_bits_flow_egress_node_id, // @[Monitor.scala:12:14]
input [4:0] io_in_flit_0_bits_virt_channel_id // @[Monitor.scala:12:14]
);
reg in_flight_0; // @[Monitor.scala:16:26]
reg in_flight_1; // @[Monitor.scala:16:26]
reg in_flight_2; // @[Monitor.scala:16:26]
reg in_flight_3; // @[Monitor.scala:16:26]
reg in_flight_4; // @[Monitor.scala:16:26]
reg in_flight_5; // @[Monitor.scala:16:26]
reg in_flight_6; // @[Monitor.scala:16:26]
reg in_flight_7; // @[Monitor.scala:16:26]
reg in_flight_8; // @[Monitor.scala:16:26]
reg in_flight_9; // @[Monitor.scala:16:26]
reg in_flight_10; // @[Monitor.scala:16:26]
reg in_flight_11; // @[Monitor.scala:16:26]
reg in_flight_12; // @[Monitor.scala:16:26]
reg in_flight_13; // @[Monitor.scala:16:26]
reg in_flight_14; // @[Monitor.scala:16:26]
reg in_flight_15; // @[Monitor.scala:16:26]
reg in_flight_16; // @[Monitor.scala:16:26]
reg in_flight_17; // @[Monitor.scala:16:26]
reg in_flight_18; // @[Monitor.scala:16:26]
reg in_flight_19; // @[Monitor.scala:16:26]
reg in_flight_20; // @[Monitor.scala:16:26]
reg in_flight_21; // @[Monitor.scala:16:26]
wire _GEN = io_in_flit_0_bits_virt_channel_id == 5'h0; // @[Monitor.scala:21:46]
wire _GEN_0 = io_in_flit_0_bits_virt_channel_id == 5'h1; // @[Monitor.scala:21:46]
wire _GEN_1 = io_in_flit_0_bits_virt_channel_id == 5'h4; // @[Monitor.scala:21:46]
wire _GEN_2 = io_in_flit_0_bits_virt_channel_id == 5'h5; // @[Monitor.scala:21:46]
wire _GEN_3 = io_in_flit_0_bits_virt_channel_id == 5'h6; // @[Monitor.scala:21:46]
wire _GEN_4 = io_in_flit_0_bits_virt_channel_id == 5'h7; // @[Monitor.scala:21:46] |
Generate the Verilog code corresponding to the following Chisel files.
File PlusArg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.experimental._
import chisel3.util.HasBlackBoxResource
@deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05")
case class PlusArgInfo(default: BigInt, docstring: String)
/** Case class for PlusArg information
*
* @tparam A scala type of the PlusArg value
* @param default optional default value
* @param docstring text to include in the help
* @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT)
*/
private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String)
/** Typeclass for converting a type to a doctype string
* @tparam A some type
*/
trait Doctypeable[A] {
/** Return the doctype string for some option */
def toDoctype(a: Option[A]): String
}
/** Object containing implementations of the Doctypeable typeclass */
object Doctypes {
/** Converts an Int => "INT" */
implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" }
/** Converts a BigInt => "INT" */
implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" }
/** Converts a String => "STRING" */
implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" }
}
class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map(
"FORMAT" -> StringParam(format),
"DEFAULT" -> IntParam(default),
"WIDTH" -> IntParam(width)
)) with HasBlackBoxResource {
val io = IO(new Bundle {
val out = Output(UInt(width.W))
})
addResource("/vsrc/plusarg_reader.v")
}
/* This wrapper class has no outputs, making it clear it is a simulation-only construct */
class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module {
val io = IO(new Bundle {
val count = Input(UInt(width.W))
})
val max = Module(new plusarg_reader(format, default, docstring, width)).io.out
when (max > 0.U) {
assert (io.count < max, s"Timeout exceeded: $docstring")
}
}
import Doctypes._
object PlusArg
{
/** PlusArg("foo") will return 42.U if the simulation is run with +foo=42
* Do not use this as an initial register value. The value is set in an
* initial block and thus accessing it from another initial is racey.
* Add a docstring to document the arg, which can be dumped in an elaboration
* pass.
*/
def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out
}
/** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert
* to kill the simulation when count exceeds the specified integer argument.
* Default 0 will never assert.
*/
def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count
}
}
object PlusArgArtefacts {
private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty
/* Add a new PlusArg */
@deprecated(
"Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08",
"Rocket Chip 2020.05"
)
def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring)
/** Add a new PlusArg
*
* @tparam A scala type of the PlusArg value
* @param name name for the PlusArg
* @param default optional default value
* @param docstring text to include in the help
*/
def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit =
artefacts = artefacts ++
Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default)))
/* From plus args, generate help text */
private def serializeHelp_cHeader(tab: String = ""): String = artefacts
.map{ case(arg, info) =>
s"""|$tab+$arg=${info.doctype}\\n\\
|$tab${" "*20}${info.docstring}\\n\\
|""".stripMargin ++ info.default.map{ case default =>
s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("")
}.toSeq.mkString("\\n\\\n") ++ "\""
/* From plus args, generate a char array of their names */
private def serializeArray_cHeader(tab: String = ""): String = {
val prettyTab = tab + " " * 44 // Length of 'static const ...'
s"${tab}static const char * verilog_plusargs [] = {\\\n" ++
artefacts
.map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" }
.mkString("")++
s"${prettyTab}0};"
}
/* Generate C code to be included in emulator.cc that helps with
* argument parsing based on available Verilog PlusArgs */
def serialize_cHeader(): String =
s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\
|${serializeHelp_cHeader(" "*7)}
|${serializeArray_cHeader()}
|""".stripMargin
}
File Nodes.scala:
package constellation.channel
import chisel3._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.{Parameters, Field}
import freechips.rocketchip.diplomacy._
case class EmptyParams()
case class ChannelEdgeParams(cp: ChannelParams, p: Parameters)
object ChannelImp extends SimpleNodeImp[EmptyParams, ChannelParams, ChannelEdgeParams, Channel] {
def edge(pd: EmptyParams, pu: ChannelParams, p: Parameters, sourceInfo: SourceInfo) = {
ChannelEdgeParams(pu, p)
}
def bundle(e: ChannelEdgeParams) = new Channel(e.cp)(e.p)
def render(e: ChannelEdgeParams) = if (e.cp.possibleFlows.size == 0) {
RenderedEdge(colour = "ffffff", label = "X")
} else {
RenderedEdge(colour = "#0000ff", label = e.cp.payloadBits.toString)
}
override def monitor(bundle: Channel, edge: ChannelEdgeParams): Unit = {
val monitor = Module(new NoCMonitor(edge.cp)(edge.p))
monitor.io.in := bundle
}
// TODO: Add nodepath stuff? override def mixO, override def mixI
}
case class ChannelSourceNode(val destId: Int)(implicit valName: ValName) extends SourceNode(ChannelImp)(Seq(EmptyParams()))
case class ChannelDestNode(val destParams: ChannelParams)(implicit valName: ValName) extends SinkNode(ChannelImp)(Seq(destParams))
case class ChannelAdapterNode(
slaveFn: ChannelParams => ChannelParams = { d => d })(
implicit valName: ValName) extends AdapterNode(ChannelImp)((e: EmptyParams) => e, slaveFn)
case class ChannelIdentityNode()(implicit valName: ValName) extends IdentityNode(ChannelImp)()
case class ChannelEphemeralNode()(implicit valName: ValName) extends EphemeralNode(ChannelImp)()
case class IngressChannelEdgeParams(cp: IngressChannelParams, p: Parameters)
case class EgressChannelEdgeParams(cp: EgressChannelParams, p: Parameters)
object IngressChannelImp extends SimpleNodeImp[EmptyParams, IngressChannelParams, IngressChannelEdgeParams, IngressChannel] {
def edge(pd: EmptyParams, pu: IngressChannelParams, p: Parameters, sourceInfo: SourceInfo) = {
IngressChannelEdgeParams(pu, p)
}
def bundle(e: IngressChannelEdgeParams) = new IngressChannel(e.cp)(e.p)
def render(e: IngressChannelEdgeParams) = if (e.cp.possibleFlows.size == 0) {
RenderedEdge(colour = "ffffff", label = "X")
} else {
RenderedEdge(colour = "#00ff00", label = e.cp.payloadBits.toString)
}
}
object EgressChannelImp extends SimpleNodeImp[EmptyParams, EgressChannelParams, EgressChannelEdgeParams, EgressChannel] {
def edge(pd: EmptyParams, pu: EgressChannelParams, p: Parameters, sourceInfo: SourceInfo) = {
EgressChannelEdgeParams(pu, p)
}
def bundle(e: EgressChannelEdgeParams) = new EgressChannel(e.cp)(e.p)
def render(e: EgressChannelEdgeParams) = if (e.cp.possibleFlows.size == 0) {
RenderedEdge(colour = "ffffff", label = "X")
} else {
RenderedEdge(colour = "#ff0000", label = e.cp.payloadBits.toString)
}
}
case class IngressChannelSourceNode(val destId: Int)(implicit valName: ValName) extends SourceNode(IngressChannelImp)(Seq(EmptyParams()))
case class IngressChannelDestNode(val destParams: IngressChannelParams)(implicit valName: ValName) extends SinkNode(IngressChannelImp)(Seq(destParams))
case class EgressChannelSourceNode(val egressId: Int)(implicit valName: ValName) extends SourceNode(EgressChannelImp)(Seq(EmptyParams()))
case class EgressChannelDestNode(val destParams: EgressChannelParams)(implicit valName: ValName) extends SinkNode(EgressChannelImp)(Seq(destParams))
case class IngressChannelAdapterNode(
slaveFn: IngressChannelParams => IngressChannelParams = { d => d })(
implicit valName: ValName) extends AdapterNode(IngressChannelImp)(m => m, slaveFn)
case class EgressChannelAdapterNode(
slaveFn: EgressChannelParams => EgressChannelParams = { d => d })(
implicit valName: ValName) extends AdapterNode(EgressChannelImp)(m => m, slaveFn)
case class IngressChannelIdentityNode()(implicit valName: ValName) extends IdentityNode(IngressChannelImp)()
case class EgressChannelIdentityNode()(implicit valName: ValName) extends IdentityNode(EgressChannelImp)()
case class IngressChannelEphemeralNode()(implicit valName: ValName) extends EphemeralNode(IngressChannelImp)()
case class EgressChannelEphemeralNode()(implicit valName: ValName) extends EphemeralNode(EgressChannelImp)()
File Router.scala:
package constellation.router
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.{Field, Parameters}
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.util._
import constellation.channel._
import constellation.routing.{RoutingRelation}
import constellation.noc.{HasNoCParams}
case class UserRouterParams(
// Payload width. Must match payload width on all channels attached to this routing node
payloadBits: Int = 64,
// Combines SA and ST stages (removes pipeline register)
combineSAST: Boolean = false,
// Combines RC and VA stages (removes pipeline register)
combineRCVA: Boolean = false,
// Adds combinational path from SA to VA
coupleSAVA: Boolean = false,
vcAllocator: VCAllocatorParams => Parameters => VCAllocator = (vP) => (p) => new RotatingSingleVCAllocator(vP)(p)
)
case class RouterParams(
nodeId: Int,
nIngress: Int,
nEgress: Int,
user: UserRouterParams
)
trait HasRouterOutputParams {
def outParams: Seq[ChannelParams]
def egressParams: Seq[EgressChannelParams]
def allOutParams = outParams ++ egressParams
def nOutputs = outParams.size
def nEgress = egressParams.size
def nAllOutputs = allOutParams.size
}
trait HasRouterInputParams {
def inParams: Seq[ChannelParams]
def ingressParams: Seq[IngressChannelParams]
def allInParams = inParams ++ ingressParams
def nInputs = inParams.size
def nIngress = ingressParams.size
def nAllInputs = allInParams.size
}
trait HasRouterParams
{
def routerParams: RouterParams
def nodeId = routerParams.nodeId
def payloadBits = routerParams.user.payloadBits
}
class DebugBundle(val nIn: Int) extends Bundle {
val va_stall = Vec(nIn, UInt())
val sa_stall = Vec(nIn, UInt())
}
class Router(
val routerParams: RouterParams,
preDiplomaticInParams: Seq[ChannelParams],
preDiplomaticIngressParams: Seq[IngressChannelParams],
outDests: Seq[Int],
egressIds: Seq[Int]
)(implicit p: Parameters) extends LazyModule with HasNoCParams with HasRouterParams {
val allPreDiplomaticInParams = preDiplomaticInParams ++ preDiplomaticIngressParams
val destNodes = preDiplomaticInParams.map(u => ChannelDestNode(u))
val sourceNodes = outDests.map(u => ChannelSourceNode(u))
val ingressNodes = preDiplomaticIngressParams.map(u => IngressChannelDestNode(u))
val egressNodes = egressIds.map(u => EgressChannelSourceNode(u))
val debugNode = BundleBridgeSource(() => new DebugBundle(allPreDiplomaticInParams.size))
val ctrlNode = if (hasCtrl) Some(BundleBridgeSource(() => new RouterCtrlBundle)) else None
def inParams = module.inParams
def outParams = module.outParams
def ingressParams = module.ingressParams
def egressParams = module.egressParams
lazy val module = new LazyModuleImp(this) with HasRouterInputParams with HasRouterOutputParams {
val (io_in, edgesIn) = destNodes.map(_.in(0)).unzip
val (io_out, edgesOut) = sourceNodes.map(_.out(0)).unzip
val (io_ingress, edgesIngress) = ingressNodes.map(_.in(0)).unzip
val (io_egress, edgesEgress) = egressNodes.map(_.out(0)).unzip
val io_debug = debugNode.out(0)._1
val inParams = edgesIn.map(_.cp)
val outParams = edgesOut.map(_.cp)
val ingressParams = edgesIngress.map(_.cp)
val egressParams = edgesEgress.map(_.cp)
allOutParams.foreach(u => require(u.srcId == nodeId && u.payloadBits == routerParams.user.payloadBits))
allInParams.foreach(u => require(u.destId == nodeId && u.payloadBits == routerParams.user.payloadBits))
require(nIngress == routerParams.nIngress)
require(nEgress == routerParams.nEgress)
require(nAllInputs >= 1)
require(nAllOutputs >= 1)
require(nodeId < (1 << nodeIdBits))
val input_units = inParams.zipWithIndex.map { case (u,i) =>
Module(new InputUnit(u, outParams, egressParams,
routerParams.user.combineRCVA, routerParams.user.combineSAST))
.suggestName(s"input_unit_${i}_from_${u.srcId}") }
val ingress_units = ingressParams.zipWithIndex.map { case (u,i) =>
Module(new IngressUnit(i, u, outParams, egressParams,
routerParams.user.combineRCVA, routerParams.user.combineSAST))
.suggestName(s"ingress_unit_${i+nInputs}_from_${u.ingressId}") }
val all_input_units = input_units ++ ingress_units
val output_units = outParams.zipWithIndex.map { case (u,i) =>
Module(new OutputUnit(inParams, ingressParams, u))
.suggestName(s"output_unit_${i}_to_${u.destId}")}
val egress_units = egressParams.zipWithIndex.map { case (u,i) =>
Module(new EgressUnit(routerParams.user.coupleSAVA && all_input_units.size == 1,
routerParams.user.combineSAST,
inParams, ingressParams, u))
.suggestName(s"egress_unit_${i+nOutputs}_to_${u.egressId}")}
val all_output_units = output_units ++ egress_units
val switch = Module(new Switch(routerParams, inParams, outParams, ingressParams, egressParams))
val switch_allocator = Module(new SwitchAllocator(routerParams, inParams, outParams, ingressParams, egressParams))
val vc_allocator = Module(routerParams.user.vcAllocator(
VCAllocatorParams(routerParams, inParams, outParams, ingressParams, egressParams)
)(p))
val route_computer = Module(new RouteComputer(routerParams, inParams, outParams, ingressParams, egressParams))
val fires_count = WireInit(PopCount(vc_allocator.io.req.map(_.fire)))
dontTouch(fires_count)
(io_in zip input_units ).foreach { case (i,u) => u.io.in <> i }
(io_ingress zip ingress_units).foreach { case (i,u) => u.io.in <> i.flit }
(output_units zip io_out ).foreach { case (u,o) => o <> u.io.out }
(egress_units zip io_egress).foreach { case (u,o) => o.flit <> u.io.out }
(route_computer.io.req zip all_input_units).foreach {
case (i,u) => i <> u.io.router_req }
(all_input_units zip route_computer.io.resp).foreach {
case (u,o) => u.io.router_resp <> o }
(vc_allocator.io.req zip all_input_units).foreach {
case (i,u) => i <> u.io.vcalloc_req }
(all_input_units zip vc_allocator.io.resp).foreach {
case (u,o) => u.io.vcalloc_resp <> o }
(all_output_units zip vc_allocator.io.out_allocs).foreach {
case (u,a) => u.io.allocs <> a }
(vc_allocator.io.channel_status zip all_output_units).foreach {
case (a,u) => a := u.io.channel_status }
all_input_units.foreach(in => all_output_units.zipWithIndex.foreach { case (out,outIdx) =>
in.io.out_credit_available(outIdx) := out.io.credit_available
})
(all_input_units zip switch_allocator.io.req).foreach {
case (u,r) => r <> u.io.salloc_req }
(all_output_units zip switch_allocator.io.credit_alloc).foreach {
case (u,a) => u.io.credit_alloc := a }
(switch.io.in zip all_input_units).foreach {
case (i,u) => i <> u.io.out }
(all_output_units zip switch.io.out).foreach {
case (u,o) => u.io.in <> o }
switch.io.sel := (if (routerParams.user.combineSAST) {
switch_allocator.io.switch_sel
} else {
RegNext(switch_allocator.io.switch_sel)
})
if (hasCtrl) {
val io_ctrl = ctrlNode.get.out(0)._1
val ctrl = Module(new RouterControlUnit(routerParams, inParams, outParams, ingressParams, egressParams))
io_ctrl <> ctrl.io.ctrl
(all_input_units zip ctrl.io.in_block ).foreach { case (l,r) => l.io.block := r }
(all_input_units zip ctrl.io.in_fire ).foreach { case (l,r) => r := l.io.out.map(_.valid) }
} else {
input_units.foreach(_.io.block := false.B)
ingress_units.foreach(_.io.block := false.B)
}
(io_debug.va_stall zip all_input_units.map(_.io.debug.va_stall)).map { case (l,r) => l := r }
(io_debug.sa_stall zip all_input_units.map(_.io.debug.sa_stall)).map { case (l,r) => l := r }
val debug_tsc = RegInit(0.U(64.W))
debug_tsc := debug_tsc + 1.U
val debug_sample = RegInit(0.U(64.W))
debug_sample := debug_sample + 1.U
val sample_rate = PlusArg("noc_util_sample_rate", width=20)
when (debug_sample === sample_rate - 1.U) { debug_sample := 0.U }
def sample(fire: Bool, s: String) = {
val util_ctr = RegInit(0.U(64.W))
val fired = RegInit(false.B)
util_ctr := util_ctr + fire
fired := fired || fire
when (sample_rate =/= 0.U && debug_sample === sample_rate - 1.U && fired) {
val fmtStr = s"nocsample %d $s %d\n"
printf(fmtStr, debug_tsc, util_ctr);
fired := fire
}
}
destNodes.map(_.in(0)).foreach { case (in, edge) => in.flit.map { f =>
sample(f.fire, s"${edge.cp.srcId} $nodeId")
} }
ingressNodes.map(_.in(0)).foreach { case (in, edge) =>
sample(in.flit.fire, s"i${edge.cp.asInstanceOf[IngressChannelParams].ingressId} $nodeId")
}
egressNodes.map(_.out(0)).foreach { case (out, edge) =>
sample(out.flit.fire, s"$nodeId e${edge.cp.asInstanceOf[EgressChannelParams].egressId}")
}
}
}
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
| module Router_22( // @[Router.scala:89:25]
input clock, // @[Router.scala:89:25]
input reset, // @[Router.scala:89:25]
output [1:0] auto_debug_out_va_stall_0, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_debug_out_va_stall_1, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_debug_out_sa_stall_0, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_debug_out_sa_stall_1, // @[LazyModuleImp.scala:107:25]
input auto_egress_nodes_out_flit_ready, // @[LazyModuleImp.scala:107:25]
output auto_egress_nodes_out_flit_valid, // @[LazyModuleImp.scala:107:25]
output auto_egress_nodes_out_flit_bits_head, // @[LazyModuleImp.scala:107:25]
output auto_egress_nodes_out_flit_bits_tail, // @[LazyModuleImp.scala:107:25]
output [36:0] auto_egress_nodes_out_flit_bits_payload, // @[LazyModuleImp.scala:107:25]
output auto_ingress_nodes_in_flit_ready, // @[LazyModuleImp.scala:107:25]
input auto_ingress_nodes_in_flit_valid, // @[LazyModuleImp.scala:107:25]
input auto_ingress_nodes_in_flit_bits_head, // @[LazyModuleImp.scala:107:25]
input auto_ingress_nodes_in_flit_bits_tail, // @[LazyModuleImp.scala:107:25]
input [36:0] auto_ingress_nodes_in_flit_bits_payload, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_ingress_nodes_in_flit_bits_egress_id, // @[LazyModuleImp.scala:107:25]
output auto_source_nodes_out_flit_0_valid, // @[LazyModuleImp.scala:107:25]
output auto_source_nodes_out_flit_0_bits_head, // @[LazyModuleImp.scala:107:25]
output auto_source_nodes_out_flit_0_bits_tail, // @[LazyModuleImp.scala:107:25]
output [36:0] auto_source_nodes_out_flit_0_bits_payload, // @[LazyModuleImp.scala:107:25]
output auto_source_nodes_out_flit_0_bits_flow_vnet_id, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_source_nodes_out_flit_0_bits_flow_ingress_node, // @[LazyModuleImp.scala:107:25]
output auto_source_nodes_out_flit_0_bits_flow_ingress_node_id, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_source_nodes_out_flit_0_bits_flow_egress_node, // @[LazyModuleImp.scala:107:25]
output auto_source_nodes_out_flit_0_bits_flow_egress_node_id, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_source_nodes_out_flit_0_bits_virt_channel_id, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_source_nodes_out_credit_return, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_source_nodes_out_vc_free, // @[LazyModuleImp.scala:107:25]
input auto_dest_nodes_in_flit_0_valid, // @[LazyModuleImp.scala:107:25]
input auto_dest_nodes_in_flit_0_bits_head, // @[LazyModuleImp.scala:107:25]
input auto_dest_nodes_in_flit_0_bits_tail, // @[LazyModuleImp.scala:107:25]
input [36:0] auto_dest_nodes_in_flit_0_bits_payload, // @[LazyModuleImp.scala:107:25]
input auto_dest_nodes_in_flit_0_bits_flow_vnet_id, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_dest_nodes_in_flit_0_bits_flow_ingress_node, // @[LazyModuleImp.scala:107:25]
input auto_dest_nodes_in_flit_0_bits_flow_ingress_node_id, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_dest_nodes_in_flit_0_bits_flow_egress_node, // @[LazyModuleImp.scala:107:25]
input auto_dest_nodes_in_flit_0_bits_flow_egress_node_id, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_dest_nodes_in_flit_0_bits_virt_channel_id, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_dest_nodes_in_credit_return, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_dest_nodes_in_vc_free // @[LazyModuleImp.scala:107:25]
);
wire [19:0] _plusarg_reader_out; // @[PlusArg.scala:80:11]
wire _route_computer_io_resp_1_vc_sel_0_0; // @[Router.scala:136:32]
wire _route_computer_io_resp_1_vc_sel_0_1; // @[Router.scala:136:32]
wire _route_computer_io_resp_1_vc_sel_0_2; // @[Router.scala:136:32]
wire _route_computer_io_resp_1_vc_sel_0_3; // @[Router.scala:136:32]
wire _route_computer_io_resp_0_vc_sel_0_0; // @[Router.scala:136:32]
wire _route_computer_io_resp_0_vc_sel_0_1; // @[Router.scala:136:32]
wire _vc_allocator_io_req_1_ready; // @[Router.scala:133:30]
wire _vc_allocator_io_req_0_ready; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_1_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_0_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_0_1; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_0_2; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_0_3; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_0_vc_sel_1_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_0_vc_sel_0_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_0_vc_sel_0_1; // @[Router.scala:133:30]
wire _vc_allocator_io_out_allocs_1_0_alloc; // @[Router.scala:133:30]
wire _vc_allocator_io_out_allocs_0_0_alloc; // @[Router.scala:133:30]
wire _vc_allocator_io_out_allocs_0_1_alloc; // @[Router.scala:133:30]
wire _vc_allocator_io_out_allocs_0_3_alloc; // @[Router.scala:133:30]
wire _switch_allocator_io_req_1_0_ready; // @[Router.scala:132:34]
wire _switch_allocator_io_req_0_0_ready; // @[Router.scala:132:34]
wire _switch_allocator_io_credit_alloc_1_0_alloc; // @[Router.scala:132:34]
wire _switch_allocator_io_credit_alloc_1_0_tail; // @[Router.scala:132:34]
wire _switch_allocator_io_credit_alloc_0_0_alloc; // @[Router.scala:132:34]
wire _switch_allocator_io_credit_alloc_0_1_alloc; // @[Router.scala:132:34]
wire _switch_allocator_io_credit_alloc_0_3_alloc; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_1_0_1_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_1_0_0_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_0_0_1_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_0_0_0_0; // @[Router.scala:132:34]
wire _switch_io_out_1_0_valid; // @[Router.scala:131:24]
wire _switch_io_out_1_0_bits_head; // @[Router.scala:131:24]
wire _switch_io_out_1_0_bits_tail; // @[Router.scala:131:24]
wire [36:0] _switch_io_out_1_0_bits_payload; // @[Router.scala:131:24]
wire [3:0] _switch_io_out_1_0_bits_flow_ingress_node; // @[Router.scala:131:24]
wire _switch_io_out_1_0_bits_flow_ingress_node_id; // @[Router.scala:131:24]
wire _switch_io_out_0_0_valid; // @[Router.scala:131:24]
wire _switch_io_out_0_0_bits_head; // @[Router.scala:131:24]
wire _switch_io_out_0_0_bits_tail; // @[Router.scala:131:24]
wire [36:0] _switch_io_out_0_0_bits_payload; // @[Router.scala:131:24]
wire _switch_io_out_0_0_bits_flow_vnet_id; // @[Router.scala:131:24]
wire [3:0] _switch_io_out_0_0_bits_flow_ingress_node; // @[Router.scala:131:24]
wire _switch_io_out_0_0_bits_flow_ingress_node_id; // @[Router.scala:131:24]
wire [3:0] _switch_io_out_0_0_bits_flow_egress_node; // @[Router.scala:131:24]
wire _switch_io_out_0_0_bits_flow_egress_node_id; // @[Router.scala:131:24]
wire [1:0] _switch_io_out_0_0_bits_virt_channel_id; // @[Router.scala:131:24]
wire _egress_unit_1_to_10_io_credit_available_0; // @[Router.scala:125:13]
wire _egress_unit_1_to_10_io_channel_status_0_occupied; // @[Router.scala:125:13]
wire _egress_unit_1_to_10_io_out_valid; // @[Router.scala:125:13]
wire _output_unit_0_to_10_io_credit_available_0; // @[Router.scala:122:13]
wire _output_unit_0_to_10_io_credit_available_1; // @[Router.scala:122:13]
wire _output_unit_0_to_10_io_credit_available_3; // @[Router.scala:122:13]
wire _output_unit_0_to_10_io_channel_status_0_occupied; // @[Router.scala:122:13]
wire _output_unit_0_to_10_io_channel_status_1_occupied; // @[Router.scala:122:13]
wire _output_unit_0_to_10_io_channel_status_3_occupied; // @[Router.scala:122:13]
wire [3:0] _ingress_unit_1_from_10_io_router_req_bits_flow_egress_node; // @[Router.scala:116:13]
wire _ingress_unit_1_from_10_io_vcalloc_req_valid; // @[Router.scala:116:13]
wire _ingress_unit_1_from_10_io_vcalloc_req_bits_vc_sel_1_0; // @[Router.scala:116:13]
wire _ingress_unit_1_from_10_io_vcalloc_req_bits_vc_sel_0_0; // @[Router.scala:116:13]
wire _ingress_unit_1_from_10_io_vcalloc_req_bits_vc_sel_0_1; // @[Router.scala:116:13]
wire _ingress_unit_1_from_10_io_vcalloc_req_bits_vc_sel_0_2; // @[Router.scala:116:13]
wire _ingress_unit_1_from_10_io_vcalloc_req_bits_vc_sel_0_3; // @[Router.scala:116:13]
wire _ingress_unit_1_from_10_io_salloc_req_0_valid; // @[Router.scala:116:13]
wire _ingress_unit_1_from_10_io_salloc_req_0_bits_vc_sel_1_0; // @[Router.scala:116:13]
wire _ingress_unit_1_from_10_io_salloc_req_0_bits_vc_sel_0_0; // @[Router.scala:116:13]
wire _ingress_unit_1_from_10_io_salloc_req_0_bits_vc_sel_0_1; // @[Router.scala:116:13]
wire _ingress_unit_1_from_10_io_salloc_req_0_bits_vc_sel_0_2; // @[Router.scala:116:13]
wire _ingress_unit_1_from_10_io_salloc_req_0_bits_vc_sel_0_3; // @[Router.scala:116:13]
wire _ingress_unit_1_from_10_io_salloc_req_0_bits_tail; // @[Router.scala:116:13]
wire _ingress_unit_1_from_10_io_out_0_valid; // @[Router.scala:116:13]
wire _ingress_unit_1_from_10_io_out_0_bits_flit_head; // @[Router.scala:116:13]
wire _ingress_unit_1_from_10_io_out_0_bits_flit_tail; // @[Router.scala:116:13]
wire [36:0] _ingress_unit_1_from_10_io_out_0_bits_flit_payload; // @[Router.scala:116:13]
wire _ingress_unit_1_from_10_io_out_0_bits_flit_flow_vnet_id; // @[Router.scala:116:13]
wire [3:0] _ingress_unit_1_from_10_io_out_0_bits_flit_flow_ingress_node; // @[Router.scala:116:13]
wire _ingress_unit_1_from_10_io_out_0_bits_flit_flow_ingress_node_id; // @[Router.scala:116:13]
wire [3:0] _ingress_unit_1_from_10_io_out_0_bits_flit_flow_egress_node; // @[Router.scala:116:13]
wire _ingress_unit_1_from_10_io_out_0_bits_flit_flow_egress_node_id; // @[Router.scala:116:13]
wire [1:0] _ingress_unit_1_from_10_io_out_0_bits_out_virt_channel; // @[Router.scala:116:13]
wire _ingress_unit_1_from_10_io_in_ready; // @[Router.scala:116:13]
wire [1:0] _input_unit_0_from_8_io_router_req_bits_src_virt_id; // @[Router.scala:112:13]
wire _input_unit_0_from_8_io_router_req_bits_flow_vnet_id; // @[Router.scala:112:13]
wire [3:0] _input_unit_0_from_8_io_router_req_bits_flow_ingress_node; // @[Router.scala:112:13]
wire _input_unit_0_from_8_io_router_req_bits_flow_ingress_node_id; // @[Router.scala:112:13]
wire [3:0] _input_unit_0_from_8_io_router_req_bits_flow_egress_node; // @[Router.scala:112:13]
wire _input_unit_0_from_8_io_router_req_bits_flow_egress_node_id; // @[Router.scala:112:13]
wire _input_unit_0_from_8_io_vcalloc_req_valid; // @[Router.scala:112:13]
wire _input_unit_0_from_8_io_vcalloc_req_bits_vc_sel_1_0; // @[Router.scala:112:13]
wire _input_unit_0_from_8_io_vcalloc_req_bits_vc_sel_0_0; // @[Router.scala:112:13]
wire _input_unit_0_from_8_io_vcalloc_req_bits_vc_sel_0_1; // @[Router.scala:112:13]
wire _input_unit_0_from_8_io_salloc_req_0_valid; // @[Router.scala:112:13]
wire _input_unit_0_from_8_io_salloc_req_0_bits_vc_sel_1_0; // @[Router.scala:112:13]
wire _input_unit_0_from_8_io_salloc_req_0_bits_vc_sel_0_0; // @[Router.scala:112:13]
wire _input_unit_0_from_8_io_salloc_req_0_bits_vc_sel_0_1; // @[Router.scala:112:13]
wire _input_unit_0_from_8_io_salloc_req_0_bits_vc_sel_0_2; // @[Router.scala:112:13]
wire _input_unit_0_from_8_io_salloc_req_0_bits_vc_sel_0_3; // @[Router.scala:112:13]
wire _input_unit_0_from_8_io_salloc_req_0_bits_tail; // @[Router.scala:112:13]
wire _input_unit_0_from_8_io_out_0_valid; // @[Router.scala:112:13]
wire _input_unit_0_from_8_io_out_0_bits_flit_head; // @[Router.scala:112:13]
wire _input_unit_0_from_8_io_out_0_bits_flit_tail; // @[Router.scala:112:13]
wire [36:0] _input_unit_0_from_8_io_out_0_bits_flit_payload; // @[Router.scala:112:13]
wire _input_unit_0_from_8_io_out_0_bits_flit_flow_vnet_id; // @[Router.scala:112:13]
wire [3:0] _input_unit_0_from_8_io_out_0_bits_flit_flow_ingress_node; // @[Router.scala:112:13]
wire _input_unit_0_from_8_io_out_0_bits_flit_flow_ingress_node_id; // @[Router.scala:112:13]
wire [3:0] _input_unit_0_from_8_io_out_0_bits_flit_flow_egress_node; // @[Router.scala:112:13]
wire _input_unit_0_from_8_io_out_0_bits_flit_flow_egress_node_id; // @[Router.scala:112:13]
wire [1:0] _input_unit_0_from_8_io_out_0_bits_out_virt_channel; // @[Router.scala:112:13]
wire [1:0] fires_count = {1'h0, _vc_allocator_io_req_0_ready & _input_unit_0_from_8_io_vcalloc_req_valid} + {1'h0, _vc_allocator_io_req_1_ready & _ingress_unit_1_from_10_io_vcalloc_req_valid}; // @[Decoupled.scala:51:35]
reg REG_1_0_1_0; // @[Router.scala:178:14]
reg REG_1_0_0_0; // @[Router.scala:178:14]
reg REG_0_0_1_0; // @[Router.scala:178:14]
reg REG_0_0_0_0; // @[Router.scala:178:14]
reg [63:0] debug_tsc; // @[Router.scala:195:28]
reg [63:0] debug_sample; // @[Router.scala:197:31]
wire _GEN = debug_sample == {44'h0, _plusarg_reader_out - 20'h1}; // @[PlusArg.scala:80:11]
reg [63:0] util_ctr; // @[Router.scala:203:29]
reg fired; // @[Router.scala:204:26]
wire _GEN_0 = (|_plusarg_reader_out) & _GEN; // @[PlusArg.scala:80:11]
wire _GEN_1 = _GEN_0 & fired; // @[Router.scala:204:26, :207:{33,71}]
reg [63:0] util_ctr_1; // @[Router.scala:203:29]
reg fired_1; // @[Router.scala:204:26]
wire _GEN_2 = _GEN_0 & fired_1; // @[Router.scala:204:26, :207:{33,71}]
reg [63:0] util_ctr_2; // @[Router.scala:203:29]
reg fired_2; // @[Router.scala:204:26]
wire _GEN_3 = _GEN_0 & fired_2; // @[Router.scala:204:26, :207:{33,71}] |
Generate the Verilog code corresponding to the following Chisel files.
File SwitchAllocator.scala:
package constellation.router
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.{Field, Parameters}
import freechips.rocketchip.util._
import constellation.channel._
class SwitchAllocReq(val outParams: Seq[ChannelParams], val egressParams: Seq[EgressChannelParams])
(implicit val p: Parameters) extends Bundle with HasRouterOutputParams {
val vc_sel = MixedVec(allOutParams.map { u => Vec(u.nVirtualChannels, Bool()) })
val tail = Bool()
}
class SwitchArbiter(inN: Int, outN: Int, outParams: Seq[ChannelParams], egressParams: Seq[EgressChannelParams])(implicit val p: Parameters) extends Module {
val io = IO(new Bundle {
val in = Flipped(Vec(inN, Decoupled(new SwitchAllocReq(outParams, egressParams))))
val out = Vec(outN, Decoupled(new SwitchAllocReq(outParams, egressParams)))
val chosen_oh = Vec(outN, Output(UInt(inN.W)))
})
val lock = Seq.fill(outN) { RegInit(0.U(inN.W)) }
val unassigned = Cat(io.in.map(_.valid).reverse) & ~(lock.reduce(_|_))
val mask = RegInit(0.U(inN.W))
val choices = Wire(Vec(outN, UInt(inN.W)))
var sel = PriorityEncoderOH(Cat(unassigned, unassigned & ~mask))
for (i <- 0 until outN) {
choices(i) := sel | (sel >> inN)
sel = PriorityEncoderOH(unassigned & ~choices(i))
}
io.in.foreach(_.ready := false.B)
var chosens = 0.U(inN.W)
val in_tails = Cat(io.in.map(_.bits.tail).reverse)
for (i <- 0 until outN) {
val in_valids = Cat((0 until inN).map { j => io.in(j).valid && !chosens(j) }.reverse)
val chosen = Mux((in_valids & lock(i) & ~chosens).orR, lock(i), choices(i))
io.chosen_oh(i) := chosen
io.out(i).valid := (in_valids & chosen).orR
io.out(i).bits := Mux1H(chosen, io.in.map(_.bits))
for (j <- 0 until inN) {
when (chosen(j) && io.out(i).ready) {
io.in(j).ready := true.B
}
}
chosens = chosens | chosen
when (io.out(i).fire) {
lock(i) := chosen & ~in_tails
}
}
when (io.out(0).fire) {
mask := (0 until inN).map { i => (io.chosen_oh(0) >> i) }.reduce(_|_)
} .otherwise {
mask := Mux(~mask === 0.U, 0.U, (mask << 1) | 1.U(1.W))
}
}
class SwitchAllocator(
val routerParams: RouterParams,
val inParams: Seq[ChannelParams],
val outParams: Seq[ChannelParams],
val ingressParams: Seq[IngressChannelParams],
val egressParams: Seq[EgressChannelParams]
)(implicit val p: Parameters) extends Module
with HasRouterParams
with HasRouterInputParams
with HasRouterOutputParams {
val io = IO(new Bundle {
val req = MixedVec(allInParams.map(u =>
Vec(u.destSpeedup, Flipped(Decoupled(new SwitchAllocReq(outParams, egressParams))))))
val credit_alloc = MixedVec(allOutParams.map { u => Vec(u.nVirtualChannels, Output(new OutputCreditAlloc))})
val switch_sel = MixedVec(allOutParams.map { o => Vec(o.srcSpeedup,
MixedVec(allInParams.map { i => Vec(i.destSpeedup, Output(Bool())) })) })
})
val nInputChannels = allInParams.map(_.nVirtualChannels).sum
val arbs = allOutParams.map { oP => Module(new SwitchArbiter(
allInParams.map(_.destSpeedup).reduce(_+_),
oP.srcSpeedup,
outParams,
egressParams
))}
arbs.foreach(_.io.out.foreach(_.ready := true.B))
var idx = 0
io.req.foreach(_.foreach { o =>
val fires = Wire(Vec(arbs.size, Bool()))
arbs.zipWithIndex.foreach { case (a,i) =>
a.io.in(idx).valid := o.valid && o.bits.vc_sel(i).reduce(_||_)
a.io.in(idx).bits := o.bits
fires(i) := a.io.in(idx).fire
}
o.ready := fires.reduce(_||_)
idx += 1
})
for (i <- 0 until nAllOutputs) {
for (j <- 0 until allOutParams(i).srcSpeedup) {
idx = 0
for (m <- 0 until nAllInputs) {
for (n <- 0 until allInParams(m).destSpeedup) {
io.switch_sel(i)(j)(m)(n) := arbs(i).io.in(idx).valid && arbs(i).io.chosen_oh(j)(idx) && arbs(i).io.out(j).valid
idx += 1
}
}
}
}
io.credit_alloc.foreach(_.foreach(_.alloc := false.B))
io.credit_alloc.foreach(_.foreach(_.tail := false.B))
(arbs zip io.credit_alloc).zipWithIndex.map { case ((a,i),t) =>
for (j <- 0 until i.size) {
for (k <- 0 until a.io.out.size) {
when (a.io.out(k).valid && a.io.out(k).bits.vc_sel(t)(j)) {
i(j).alloc := true.B
i(j).tail := a.io.out(k).bits.tail
}
}
}
}
}
| module SwitchAllocator( // @[SwitchAllocator.scala:64:7]
input clock, // @[SwitchAllocator.scala:64:7]
input reset, // @[SwitchAllocator.scala:64:7]
output io_req_5_0_ready, // @[SwitchAllocator.scala:74:14]
input io_req_5_0_valid, // @[SwitchAllocator.scala:74:14]
input io_req_5_0_bits_vc_sel_5_0, // @[SwitchAllocator.scala:74:14]
input io_req_5_0_bits_vc_sel_4_0, // @[SwitchAllocator.scala:74:14]
input io_req_5_0_bits_vc_sel_3_0, // @[SwitchAllocator.scala:74:14]
input io_req_5_0_bits_vc_sel_2_0, // @[SwitchAllocator.scala:74:14]
input io_req_5_0_bits_vc_sel_1_0, // @[SwitchAllocator.scala:74:14]
input io_req_5_0_bits_vc_sel_0_0, // @[SwitchAllocator.scala:74:14]
input io_req_5_0_bits_vc_sel_0_1, // @[SwitchAllocator.scala:74:14]
input io_req_5_0_bits_vc_sel_0_2, // @[SwitchAllocator.scala:74:14]
input io_req_5_0_bits_vc_sel_0_3, // @[SwitchAllocator.scala:74:14]
input io_req_5_0_bits_vc_sel_0_4, // @[SwitchAllocator.scala:74:14]
input io_req_5_0_bits_vc_sel_0_5, // @[SwitchAllocator.scala:74:14]
input io_req_5_0_bits_vc_sel_0_6, // @[SwitchAllocator.scala:74:14]
input io_req_5_0_bits_vc_sel_0_7, // @[SwitchAllocator.scala:74:14]
input io_req_5_0_bits_vc_sel_0_8, // @[SwitchAllocator.scala:74:14]
input io_req_5_0_bits_vc_sel_0_9, // @[SwitchAllocator.scala:74:14]
input io_req_5_0_bits_vc_sel_0_10, // @[SwitchAllocator.scala:74:14]
input io_req_5_0_bits_vc_sel_0_11, // @[SwitchAllocator.scala:74:14]
input io_req_5_0_bits_vc_sel_0_12, // @[SwitchAllocator.scala:74:14]
input io_req_5_0_bits_vc_sel_0_13, // @[SwitchAllocator.scala:74:14]
input io_req_5_0_bits_vc_sel_0_14, // @[SwitchAllocator.scala:74:14]
input io_req_5_0_bits_vc_sel_0_15, // @[SwitchAllocator.scala:74:14]
input io_req_5_0_bits_vc_sel_0_16, // @[SwitchAllocator.scala:74:14]
input io_req_5_0_bits_vc_sel_0_17, // @[SwitchAllocator.scala:74:14]
input io_req_5_0_bits_vc_sel_0_18, // @[SwitchAllocator.scala:74:14]
input io_req_5_0_bits_vc_sel_0_19, // @[SwitchAllocator.scala:74:14]
input io_req_5_0_bits_vc_sel_0_20, // @[SwitchAllocator.scala:74:14]
input io_req_5_0_bits_vc_sel_0_21, // @[SwitchAllocator.scala:74:14]
input io_req_5_0_bits_tail, // @[SwitchAllocator.scala:74:14]
output io_req_4_0_ready, // @[SwitchAllocator.scala:74:14]
input io_req_4_0_valid, // @[SwitchAllocator.scala:74:14]
input io_req_4_0_bits_vc_sel_5_0, // @[SwitchAllocator.scala:74:14]
input io_req_4_0_bits_vc_sel_4_0, // @[SwitchAllocator.scala:74:14]
input io_req_4_0_bits_vc_sel_3_0, // @[SwitchAllocator.scala:74:14]
input io_req_4_0_bits_vc_sel_2_0, // @[SwitchAllocator.scala:74:14]
input io_req_4_0_bits_vc_sel_1_0, // @[SwitchAllocator.scala:74:14]
input io_req_4_0_bits_vc_sel_0_0, // @[SwitchAllocator.scala:74:14]
input io_req_4_0_bits_vc_sel_0_1, // @[SwitchAllocator.scala:74:14]
input io_req_4_0_bits_vc_sel_0_2, // @[SwitchAllocator.scala:74:14]
input io_req_4_0_bits_vc_sel_0_3, // @[SwitchAllocator.scala:74:14]
input io_req_4_0_bits_vc_sel_0_4, // @[SwitchAllocator.scala:74:14]
input io_req_4_0_bits_vc_sel_0_5, // @[SwitchAllocator.scala:74:14]
input io_req_4_0_bits_vc_sel_0_6, // @[SwitchAllocator.scala:74:14]
input io_req_4_0_bits_vc_sel_0_7, // @[SwitchAllocator.scala:74:14]
input io_req_4_0_bits_vc_sel_0_8, // @[SwitchAllocator.scala:74:14]
input io_req_4_0_bits_vc_sel_0_9, // @[SwitchAllocator.scala:74:14]
input io_req_4_0_bits_vc_sel_0_10, // @[SwitchAllocator.scala:74:14]
input io_req_4_0_bits_vc_sel_0_11, // @[SwitchAllocator.scala:74:14]
input io_req_4_0_bits_vc_sel_0_12, // @[SwitchAllocator.scala:74:14]
input io_req_4_0_bits_vc_sel_0_13, // @[SwitchAllocator.scala:74:14]
input io_req_4_0_bits_vc_sel_0_14, // @[SwitchAllocator.scala:74:14]
input io_req_4_0_bits_vc_sel_0_15, // @[SwitchAllocator.scala:74:14]
input io_req_4_0_bits_vc_sel_0_16, // @[SwitchAllocator.scala:74:14]
input io_req_4_0_bits_vc_sel_0_17, // @[SwitchAllocator.scala:74:14]
input io_req_4_0_bits_vc_sel_0_18, // @[SwitchAllocator.scala:74:14]
input io_req_4_0_bits_vc_sel_0_19, // @[SwitchAllocator.scala:74:14]
input io_req_4_0_bits_vc_sel_0_20, // @[SwitchAllocator.scala:74:14]
input io_req_4_0_bits_vc_sel_0_21, // @[SwitchAllocator.scala:74:14]
input io_req_4_0_bits_tail, // @[SwitchAllocator.scala:74:14]
output io_req_3_0_ready, // @[SwitchAllocator.scala:74:14]
input io_req_3_0_valid, // @[SwitchAllocator.scala:74:14]
input io_req_3_0_bits_vc_sel_5_0, // @[SwitchAllocator.scala:74:14]
input io_req_3_0_bits_vc_sel_4_0, // @[SwitchAllocator.scala:74:14]
input io_req_3_0_bits_vc_sel_3_0, // @[SwitchAllocator.scala:74:14]
input io_req_3_0_bits_vc_sel_2_0, // @[SwitchAllocator.scala:74:14]
input io_req_3_0_bits_vc_sel_1_0, // @[SwitchAllocator.scala:74:14]
input io_req_3_0_bits_vc_sel_0_0, // @[SwitchAllocator.scala:74:14]
input io_req_3_0_bits_vc_sel_0_1, // @[SwitchAllocator.scala:74:14]
input io_req_3_0_bits_vc_sel_0_2, // @[SwitchAllocator.scala:74:14]
input io_req_3_0_bits_vc_sel_0_3, // @[SwitchAllocator.scala:74:14]
input io_req_3_0_bits_vc_sel_0_4, // @[SwitchAllocator.scala:74:14]
input io_req_3_0_bits_vc_sel_0_5, // @[SwitchAllocator.scala:74:14]
input io_req_3_0_bits_vc_sel_0_6, // @[SwitchAllocator.scala:74:14]
input io_req_3_0_bits_vc_sel_0_7, // @[SwitchAllocator.scala:74:14]
input io_req_3_0_bits_vc_sel_0_8, // @[SwitchAllocator.scala:74:14]
input io_req_3_0_bits_vc_sel_0_9, // @[SwitchAllocator.scala:74:14]
input io_req_3_0_bits_vc_sel_0_10, // @[SwitchAllocator.scala:74:14]
input io_req_3_0_bits_vc_sel_0_11, // @[SwitchAllocator.scala:74:14]
input io_req_3_0_bits_vc_sel_0_12, // @[SwitchAllocator.scala:74:14]
input io_req_3_0_bits_vc_sel_0_13, // @[SwitchAllocator.scala:74:14]
input io_req_3_0_bits_vc_sel_0_14, // @[SwitchAllocator.scala:74:14]
input io_req_3_0_bits_vc_sel_0_15, // @[SwitchAllocator.scala:74:14]
input io_req_3_0_bits_vc_sel_0_16, // @[SwitchAllocator.scala:74:14]
input io_req_3_0_bits_vc_sel_0_17, // @[SwitchAllocator.scala:74:14]
input io_req_3_0_bits_vc_sel_0_18, // @[SwitchAllocator.scala:74:14]
input io_req_3_0_bits_vc_sel_0_19, // @[SwitchAllocator.scala:74:14]
input io_req_3_0_bits_vc_sel_0_20, // @[SwitchAllocator.scala:74:14]
input io_req_3_0_bits_vc_sel_0_21, // @[SwitchAllocator.scala:74:14]
input io_req_3_0_bits_tail, // @[SwitchAllocator.scala:74:14]
output io_req_2_0_ready, // @[SwitchAllocator.scala:74:14]
input io_req_2_0_valid, // @[SwitchAllocator.scala:74:14]
input io_req_2_0_bits_vc_sel_5_0, // @[SwitchAllocator.scala:74:14]
input io_req_2_0_bits_vc_sel_4_0, // @[SwitchAllocator.scala:74:14]
input io_req_2_0_bits_vc_sel_3_0, // @[SwitchAllocator.scala:74:14]
input io_req_2_0_bits_vc_sel_2_0, // @[SwitchAllocator.scala:74:14]
input io_req_2_0_bits_vc_sel_1_0, // @[SwitchAllocator.scala:74:14]
input io_req_2_0_bits_vc_sel_0_0, // @[SwitchAllocator.scala:74:14]
input io_req_2_0_bits_vc_sel_0_1, // @[SwitchAllocator.scala:74:14]
input io_req_2_0_bits_vc_sel_0_2, // @[SwitchAllocator.scala:74:14]
input io_req_2_0_bits_vc_sel_0_3, // @[SwitchAllocator.scala:74:14]
input io_req_2_0_bits_vc_sel_0_4, // @[SwitchAllocator.scala:74:14]
input io_req_2_0_bits_vc_sel_0_5, // @[SwitchAllocator.scala:74:14]
input io_req_2_0_bits_vc_sel_0_6, // @[SwitchAllocator.scala:74:14]
input io_req_2_0_bits_vc_sel_0_7, // @[SwitchAllocator.scala:74:14]
input io_req_2_0_bits_vc_sel_0_8, // @[SwitchAllocator.scala:74:14]
input io_req_2_0_bits_vc_sel_0_9, // @[SwitchAllocator.scala:74:14]
input io_req_2_0_bits_vc_sel_0_10, // @[SwitchAllocator.scala:74:14]
input io_req_2_0_bits_vc_sel_0_11, // @[SwitchAllocator.scala:74:14]
input io_req_2_0_bits_vc_sel_0_12, // @[SwitchAllocator.scala:74:14]
input io_req_2_0_bits_vc_sel_0_13, // @[SwitchAllocator.scala:74:14]
input io_req_2_0_bits_vc_sel_0_14, // @[SwitchAllocator.scala:74:14]
input io_req_2_0_bits_vc_sel_0_15, // @[SwitchAllocator.scala:74:14]
input io_req_2_0_bits_vc_sel_0_16, // @[SwitchAllocator.scala:74:14]
input io_req_2_0_bits_vc_sel_0_17, // @[SwitchAllocator.scala:74:14]
input io_req_2_0_bits_vc_sel_0_18, // @[SwitchAllocator.scala:74:14]
input io_req_2_0_bits_vc_sel_0_19, // @[SwitchAllocator.scala:74:14]
input io_req_2_0_bits_vc_sel_0_20, // @[SwitchAllocator.scala:74:14]
input io_req_2_0_bits_vc_sel_0_21, // @[SwitchAllocator.scala:74:14]
input io_req_2_0_bits_tail, // @[SwitchAllocator.scala:74:14]
output io_req_1_0_ready, // @[SwitchAllocator.scala:74:14]
input io_req_1_0_valid, // @[SwitchAllocator.scala:74:14]
input io_req_1_0_bits_vc_sel_5_0, // @[SwitchAllocator.scala:74:14]
input io_req_1_0_bits_vc_sel_4_0, // @[SwitchAllocator.scala:74:14]
input io_req_1_0_bits_vc_sel_3_0, // @[SwitchAllocator.scala:74:14]
input io_req_1_0_bits_vc_sel_2_0, // @[SwitchAllocator.scala:74:14]
input io_req_1_0_bits_vc_sel_1_0, // @[SwitchAllocator.scala:74:14]
input io_req_1_0_bits_vc_sel_0_0, // @[SwitchAllocator.scala:74:14]
input io_req_1_0_bits_vc_sel_0_1, // @[SwitchAllocator.scala:74:14]
input io_req_1_0_bits_vc_sel_0_2, // @[SwitchAllocator.scala:74:14]
input io_req_1_0_bits_vc_sel_0_3, // @[SwitchAllocator.scala:74:14]
input io_req_1_0_bits_vc_sel_0_4, // @[SwitchAllocator.scala:74:14]
input io_req_1_0_bits_vc_sel_0_5, // @[SwitchAllocator.scala:74:14]
input io_req_1_0_bits_vc_sel_0_6, // @[SwitchAllocator.scala:74:14]
input io_req_1_0_bits_vc_sel_0_7, // @[SwitchAllocator.scala:74:14]
input io_req_1_0_bits_vc_sel_0_8, // @[SwitchAllocator.scala:74:14]
input io_req_1_0_bits_vc_sel_0_9, // @[SwitchAllocator.scala:74:14]
input io_req_1_0_bits_vc_sel_0_10, // @[SwitchAllocator.scala:74:14]
input io_req_1_0_bits_vc_sel_0_11, // @[SwitchAllocator.scala:74:14]
input io_req_1_0_bits_vc_sel_0_12, // @[SwitchAllocator.scala:74:14]
input io_req_1_0_bits_vc_sel_0_13, // @[SwitchAllocator.scala:74:14]
input io_req_1_0_bits_vc_sel_0_14, // @[SwitchAllocator.scala:74:14]
input io_req_1_0_bits_vc_sel_0_15, // @[SwitchAllocator.scala:74:14]
input io_req_1_0_bits_vc_sel_0_16, // @[SwitchAllocator.scala:74:14]
input io_req_1_0_bits_vc_sel_0_17, // @[SwitchAllocator.scala:74:14]
input io_req_1_0_bits_vc_sel_0_18, // @[SwitchAllocator.scala:74:14]
input io_req_1_0_bits_vc_sel_0_19, // @[SwitchAllocator.scala:74:14]
input io_req_1_0_bits_vc_sel_0_20, // @[SwitchAllocator.scala:74:14]
input io_req_1_0_bits_vc_sel_0_21, // @[SwitchAllocator.scala:74:14]
input io_req_1_0_bits_tail, // @[SwitchAllocator.scala:74:14]
output io_req_0_0_ready, // @[SwitchAllocator.scala:74:14]
input io_req_0_0_valid, // @[SwitchAllocator.scala:74:14]
input io_req_0_0_bits_vc_sel_5_0, // @[SwitchAllocator.scala:74:14]
input io_req_0_0_bits_vc_sel_4_0, // @[SwitchAllocator.scala:74:14]
input io_req_0_0_bits_vc_sel_3_0, // @[SwitchAllocator.scala:74:14]
input io_req_0_0_bits_vc_sel_2_0, // @[SwitchAllocator.scala:74:14]
input io_req_0_0_bits_vc_sel_1_0, // @[SwitchAllocator.scala:74:14]
input io_req_0_0_bits_tail, // @[SwitchAllocator.scala:74:14]
output io_credit_alloc_5_0_alloc, // @[SwitchAllocator.scala:74:14]
output io_credit_alloc_5_0_tail, // @[SwitchAllocator.scala:74:14]
output io_credit_alloc_4_0_alloc, // @[SwitchAllocator.scala:74:14]
output io_credit_alloc_4_0_tail, // @[SwitchAllocator.scala:74:14]
output io_credit_alloc_3_0_alloc, // @[SwitchAllocator.scala:74:14]
output io_credit_alloc_3_0_tail, // @[SwitchAllocator.scala:74:14]
output io_credit_alloc_2_0_alloc, // @[SwitchAllocator.scala:74:14]
output io_credit_alloc_2_0_tail, // @[SwitchAllocator.scala:74:14]
output io_credit_alloc_1_0_alloc, // @[SwitchAllocator.scala:74:14]
output io_credit_alloc_1_0_tail, // @[SwitchAllocator.scala:74:14]
output io_credit_alloc_0_2_alloc, // @[SwitchAllocator.scala:74:14]
output io_credit_alloc_0_3_alloc, // @[SwitchAllocator.scala:74:14]
output io_credit_alloc_0_8_alloc, // @[SwitchAllocator.scala:74:14]
output io_credit_alloc_0_9_alloc, // @[SwitchAllocator.scala:74:14]
output io_credit_alloc_0_12_alloc, // @[SwitchAllocator.scala:74:14]
output io_credit_alloc_0_13_alloc, // @[SwitchAllocator.scala:74:14]
output io_credit_alloc_0_16_alloc, // @[SwitchAllocator.scala:74:14]
output io_credit_alloc_0_17_alloc, // @[SwitchAllocator.scala:74:14]
output io_credit_alloc_0_18_alloc, // @[SwitchAllocator.scala:74:14]
output io_credit_alloc_0_19_alloc, // @[SwitchAllocator.scala:74:14]
output io_credit_alloc_0_20_alloc, // @[SwitchAllocator.scala:74:14]
output io_credit_alloc_0_21_alloc, // @[SwitchAllocator.scala:74:14]
output io_switch_sel_5_0_5_0, // @[SwitchAllocator.scala:74:14]
output io_switch_sel_5_0_4_0, // @[SwitchAllocator.scala:74:14]
output io_switch_sel_5_0_3_0, // @[SwitchAllocator.scala:74:14]
output io_switch_sel_5_0_2_0, // @[SwitchAllocator.scala:74:14]
output io_switch_sel_5_0_1_0, // @[SwitchAllocator.scala:74:14]
output io_switch_sel_5_0_0_0, // @[SwitchAllocator.scala:74:14]
output io_switch_sel_4_0_5_0, // @[SwitchAllocator.scala:74:14]
output io_switch_sel_4_0_4_0, // @[SwitchAllocator.scala:74:14]
output io_switch_sel_4_0_3_0, // @[SwitchAllocator.scala:74:14]
output io_switch_sel_4_0_2_0, // @[SwitchAllocator.scala:74:14]
output io_switch_sel_4_0_1_0, // @[SwitchAllocator.scala:74:14]
output io_switch_sel_4_0_0_0, // @[SwitchAllocator.scala:74:14]
output io_switch_sel_3_0_5_0, // @[SwitchAllocator.scala:74:14]
output io_switch_sel_3_0_4_0, // @[SwitchAllocator.scala:74:14]
output io_switch_sel_3_0_3_0, // @[SwitchAllocator.scala:74:14]
output io_switch_sel_3_0_2_0, // @[SwitchAllocator.scala:74:14]
output io_switch_sel_3_0_1_0, // @[SwitchAllocator.scala:74:14]
output io_switch_sel_3_0_0_0, // @[SwitchAllocator.scala:74:14]
output io_switch_sel_2_0_5_0, // @[SwitchAllocator.scala:74:14]
output io_switch_sel_2_0_4_0, // @[SwitchAllocator.scala:74:14]
output io_switch_sel_2_0_3_0, // @[SwitchAllocator.scala:74:14]
output io_switch_sel_2_0_2_0, // @[SwitchAllocator.scala:74:14]
output io_switch_sel_2_0_1_0, // @[SwitchAllocator.scala:74:14]
output io_switch_sel_2_0_0_0, // @[SwitchAllocator.scala:74:14]
output io_switch_sel_1_0_5_0, // @[SwitchAllocator.scala:74:14]
output io_switch_sel_1_0_4_0, // @[SwitchAllocator.scala:74:14]
output io_switch_sel_1_0_3_0, // @[SwitchAllocator.scala:74:14]
output io_switch_sel_1_0_2_0, // @[SwitchAllocator.scala:74:14]
output io_switch_sel_1_0_1_0, // @[SwitchAllocator.scala:74:14]
output io_switch_sel_1_0_0_0, // @[SwitchAllocator.scala:74:14]
output io_switch_sel_0_0_5_0, // @[SwitchAllocator.scala:74:14]
output io_switch_sel_0_0_4_0, // @[SwitchAllocator.scala:74:14]
output io_switch_sel_0_0_3_0, // @[SwitchAllocator.scala:74:14]
output io_switch_sel_0_0_2_0, // @[SwitchAllocator.scala:74:14]
output io_switch_sel_0_0_1_0 // @[SwitchAllocator.scala:74:14]
);
wire _arbs_5_io_in_0_ready; // @[SwitchAllocator.scala:83:45]
wire _arbs_5_io_in_1_ready; // @[SwitchAllocator.scala:83:45]
wire _arbs_5_io_in_2_ready; // @[SwitchAllocator.scala:83:45]
wire _arbs_5_io_in_3_ready; // @[SwitchAllocator.scala:83:45]
wire _arbs_5_io_in_4_ready; // @[SwitchAllocator.scala:83:45]
wire _arbs_5_io_in_5_ready; // @[SwitchAllocator.scala:83:45]
wire _arbs_5_io_out_0_valid; // @[SwitchAllocator.scala:83:45]
wire _arbs_5_io_out_0_bits_vc_sel_5_0; // @[SwitchAllocator.scala:83:45]
wire _arbs_5_io_out_0_bits_tail; // @[SwitchAllocator.scala:83:45]
wire [5:0] _arbs_5_io_chosen_oh_0; // @[SwitchAllocator.scala:83:45]
wire _arbs_4_io_in_0_ready; // @[SwitchAllocator.scala:83:45]
wire _arbs_4_io_in_1_ready; // @[SwitchAllocator.scala:83:45]
wire _arbs_4_io_in_2_ready; // @[SwitchAllocator.scala:83:45]
wire _arbs_4_io_in_3_ready; // @[SwitchAllocator.scala:83:45]
wire _arbs_4_io_in_4_ready; // @[SwitchAllocator.scala:83:45]
wire _arbs_4_io_in_5_ready; // @[SwitchAllocator.scala:83:45]
wire _arbs_4_io_out_0_valid; // @[SwitchAllocator.scala:83:45]
wire _arbs_4_io_out_0_bits_vc_sel_4_0; // @[SwitchAllocator.scala:83:45]
wire _arbs_4_io_out_0_bits_tail; // @[SwitchAllocator.scala:83:45]
wire [5:0] _arbs_4_io_chosen_oh_0; // @[SwitchAllocator.scala:83:45]
wire _arbs_3_io_in_0_ready; // @[SwitchAllocator.scala:83:45]
wire _arbs_3_io_in_1_ready; // @[SwitchAllocator.scala:83:45]
wire _arbs_3_io_in_2_ready; // @[SwitchAllocator.scala:83:45]
wire _arbs_3_io_in_3_ready; // @[SwitchAllocator.scala:83:45]
wire _arbs_3_io_in_4_ready; // @[SwitchAllocator.scala:83:45]
wire _arbs_3_io_in_5_ready; // @[SwitchAllocator.scala:83:45]
wire _arbs_3_io_out_0_valid; // @[SwitchAllocator.scala:83:45]
wire _arbs_3_io_out_0_bits_vc_sel_3_0; // @[SwitchAllocator.scala:83:45]
wire _arbs_3_io_out_0_bits_tail; // @[SwitchAllocator.scala:83:45]
wire [5:0] _arbs_3_io_chosen_oh_0; // @[SwitchAllocator.scala:83:45]
wire _arbs_2_io_in_0_ready; // @[SwitchAllocator.scala:83:45]
wire _arbs_2_io_in_1_ready; // @[SwitchAllocator.scala:83:45]
wire _arbs_2_io_in_2_ready; // @[SwitchAllocator.scala:83:45]
wire _arbs_2_io_in_3_ready; // @[SwitchAllocator.scala:83:45]
wire _arbs_2_io_in_4_ready; // @[SwitchAllocator.scala:83:45]
wire _arbs_2_io_in_5_ready; // @[SwitchAllocator.scala:83:45]
wire _arbs_2_io_out_0_valid; // @[SwitchAllocator.scala:83:45]
wire _arbs_2_io_out_0_bits_vc_sel_2_0; // @[SwitchAllocator.scala:83:45]
wire _arbs_2_io_out_0_bits_tail; // @[SwitchAllocator.scala:83:45]
wire [5:0] _arbs_2_io_chosen_oh_0; // @[SwitchAllocator.scala:83:45]
wire _arbs_1_io_in_0_ready; // @[SwitchAllocator.scala:83:45]
wire _arbs_1_io_in_1_ready; // @[SwitchAllocator.scala:83:45]
wire _arbs_1_io_in_2_ready; // @[SwitchAllocator.scala:83:45]
wire _arbs_1_io_in_3_ready; // @[SwitchAllocator.scala:83:45]
wire _arbs_1_io_in_4_ready; // @[SwitchAllocator.scala:83:45]
wire _arbs_1_io_in_5_ready; // @[SwitchAllocator.scala:83:45]
wire _arbs_1_io_out_0_valid; // @[SwitchAllocator.scala:83:45]
wire _arbs_1_io_out_0_bits_vc_sel_1_0; // @[SwitchAllocator.scala:83:45]
wire _arbs_1_io_out_0_bits_tail; // @[SwitchAllocator.scala:83:45]
wire [5:0] _arbs_1_io_chosen_oh_0; // @[SwitchAllocator.scala:83:45]
wire _arbs_0_io_in_1_ready; // @[SwitchAllocator.scala:83:45]
wire _arbs_0_io_in_2_ready; // @[SwitchAllocator.scala:83:45]
wire _arbs_0_io_in_3_ready; // @[SwitchAllocator.scala:83:45]
wire _arbs_0_io_in_4_ready; // @[SwitchAllocator.scala:83:45]
wire _arbs_0_io_in_5_ready; // @[SwitchAllocator.scala:83:45]
wire _arbs_0_io_out_0_valid; // @[SwitchAllocator.scala:83:45]
wire _arbs_0_io_out_0_bits_vc_sel_0_2; // @[SwitchAllocator.scala:83:45]
wire _arbs_0_io_out_0_bits_vc_sel_0_3; // @[SwitchAllocator.scala:83:45]
wire _arbs_0_io_out_0_bits_vc_sel_0_8; // @[SwitchAllocator.scala:83:45]
wire _arbs_0_io_out_0_bits_vc_sel_0_9; // @[SwitchAllocator.scala:83:45]
wire _arbs_0_io_out_0_bits_vc_sel_0_12; // @[SwitchAllocator.scala:83:45]
wire _arbs_0_io_out_0_bits_vc_sel_0_13; // @[SwitchAllocator.scala:83:45]
wire _arbs_0_io_out_0_bits_vc_sel_0_16; // @[SwitchAllocator.scala:83:45]
wire _arbs_0_io_out_0_bits_vc_sel_0_17; // @[SwitchAllocator.scala:83:45]
wire _arbs_0_io_out_0_bits_vc_sel_0_18; // @[SwitchAllocator.scala:83:45]
wire _arbs_0_io_out_0_bits_vc_sel_0_19; // @[SwitchAllocator.scala:83:45]
wire _arbs_0_io_out_0_bits_vc_sel_0_20; // @[SwitchAllocator.scala:83:45]
wire _arbs_0_io_out_0_bits_vc_sel_0_21; // @[SwitchAllocator.scala:83:45]
wire [5:0] _arbs_0_io_chosen_oh_0; // @[SwitchAllocator.scala:83:45]
wire arbs_1_io_in_0_valid = io_req_0_0_valid & io_req_0_0_bits_vc_sel_1_0; // @[SwitchAllocator.scala:95:37]
wire arbs_2_io_in_0_valid = io_req_0_0_valid & io_req_0_0_bits_vc_sel_2_0; // @[SwitchAllocator.scala:95:37]
wire arbs_3_io_in_0_valid = io_req_0_0_valid & io_req_0_0_bits_vc_sel_3_0; // @[SwitchAllocator.scala:95:37]
wire arbs_4_io_in_0_valid = io_req_0_0_valid & io_req_0_0_bits_vc_sel_4_0; // @[SwitchAllocator.scala:95:37]
wire arbs_5_io_in_0_valid = io_req_0_0_valid & io_req_0_0_bits_vc_sel_5_0; // @[SwitchAllocator.scala:95:37]
wire arbs_0_io_in_1_valid = io_req_1_0_valid & (io_req_1_0_bits_vc_sel_0_0 | io_req_1_0_bits_vc_sel_0_1 | io_req_1_0_bits_vc_sel_0_2 | io_req_1_0_bits_vc_sel_0_3 | io_req_1_0_bits_vc_sel_0_4 | io_req_1_0_bits_vc_sel_0_5 | io_req_1_0_bits_vc_sel_0_6 | io_req_1_0_bits_vc_sel_0_7 | io_req_1_0_bits_vc_sel_0_8 | io_req_1_0_bits_vc_sel_0_9 | io_req_1_0_bits_vc_sel_0_10 | io_req_1_0_bits_vc_sel_0_11 | io_req_1_0_bits_vc_sel_0_12 | io_req_1_0_bits_vc_sel_0_13 | io_req_1_0_bits_vc_sel_0_14 | io_req_1_0_bits_vc_sel_0_15 | io_req_1_0_bits_vc_sel_0_16 | io_req_1_0_bits_vc_sel_0_17 | io_req_1_0_bits_vc_sel_0_18 | io_req_1_0_bits_vc_sel_0_19 | io_req_1_0_bits_vc_sel_0_20 | io_req_1_0_bits_vc_sel_0_21); // @[SwitchAllocator.scala:95:{37,65}]
wire arbs_1_io_in_1_valid = io_req_1_0_valid & io_req_1_0_bits_vc_sel_1_0; // @[SwitchAllocator.scala:95:37]
wire arbs_2_io_in_1_valid = io_req_1_0_valid & io_req_1_0_bits_vc_sel_2_0; // @[SwitchAllocator.scala:95:37]
wire arbs_3_io_in_1_valid = io_req_1_0_valid & io_req_1_0_bits_vc_sel_3_0; // @[SwitchAllocator.scala:95:37]
wire arbs_4_io_in_1_valid = io_req_1_0_valid & io_req_1_0_bits_vc_sel_4_0; // @[SwitchAllocator.scala:95:37]
wire arbs_5_io_in_1_valid = io_req_1_0_valid & io_req_1_0_bits_vc_sel_5_0; // @[SwitchAllocator.scala:95:37]
wire arbs_0_io_in_2_valid = io_req_2_0_valid & (io_req_2_0_bits_vc_sel_0_0 | io_req_2_0_bits_vc_sel_0_1 | io_req_2_0_bits_vc_sel_0_2 | io_req_2_0_bits_vc_sel_0_3 | io_req_2_0_bits_vc_sel_0_4 | io_req_2_0_bits_vc_sel_0_5 | io_req_2_0_bits_vc_sel_0_6 | io_req_2_0_bits_vc_sel_0_7 | io_req_2_0_bits_vc_sel_0_8 | io_req_2_0_bits_vc_sel_0_9 | io_req_2_0_bits_vc_sel_0_10 | io_req_2_0_bits_vc_sel_0_11 | io_req_2_0_bits_vc_sel_0_12 | io_req_2_0_bits_vc_sel_0_13 | io_req_2_0_bits_vc_sel_0_14 | io_req_2_0_bits_vc_sel_0_15 | io_req_2_0_bits_vc_sel_0_16 | io_req_2_0_bits_vc_sel_0_17 | io_req_2_0_bits_vc_sel_0_18 | io_req_2_0_bits_vc_sel_0_19 | io_req_2_0_bits_vc_sel_0_20 | io_req_2_0_bits_vc_sel_0_21); // @[SwitchAllocator.scala:95:{37,65}]
wire arbs_1_io_in_2_valid = io_req_2_0_valid & io_req_2_0_bits_vc_sel_1_0; // @[SwitchAllocator.scala:95:37]
wire arbs_2_io_in_2_valid = io_req_2_0_valid & io_req_2_0_bits_vc_sel_2_0; // @[SwitchAllocator.scala:95:37]
wire arbs_3_io_in_2_valid = io_req_2_0_valid & io_req_2_0_bits_vc_sel_3_0; // @[SwitchAllocator.scala:95:37]
wire arbs_4_io_in_2_valid = io_req_2_0_valid & io_req_2_0_bits_vc_sel_4_0; // @[SwitchAllocator.scala:95:37]
wire arbs_5_io_in_2_valid = io_req_2_0_valid & io_req_2_0_bits_vc_sel_5_0; // @[SwitchAllocator.scala:95:37]
wire arbs_0_io_in_3_valid = io_req_3_0_valid & (io_req_3_0_bits_vc_sel_0_0 | io_req_3_0_bits_vc_sel_0_1 | io_req_3_0_bits_vc_sel_0_2 | io_req_3_0_bits_vc_sel_0_3 | io_req_3_0_bits_vc_sel_0_4 | io_req_3_0_bits_vc_sel_0_5 | io_req_3_0_bits_vc_sel_0_6 | io_req_3_0_bits_vc_sel_0_7 | io_req_3_0_bits_vc_sel_0_8 | io_req_3_0_bits_vc_sel_0_9 | io_req_3_0_bits_vc_sel_0_10 | io_req_3_0_bits_vc_sel_0_11 | io_req_3_0_bits_vc_sel_0_12 | io_req_3_0_bits_vc_sel_0_13 | io_req_3_0_bits_vc_sel_0_14 | io_req_3_0_bits_vc_sel_0_15 | io_req_3_0_bits_vc_sel_0_16 | io_req_3_0_bits_vc_sel_0_17 | io_req_3_0_bits_vc_sel_0_18 | io_req_3_0_bits_vc_sel_0_19 | io_req_3_0_bits_vc_sel_0_20 | io_req_3_0_bits_vc_sel_0_21); // @[SwitchAllocator.scala:95:{37,65}]
wire arbs_1_io_in_3_valid = io_req_3_0_valid & io_req_3_0_bits_vc_sel_1_0; // @[SwitchAllocator.scala:95:37]
wire arbs_2_io_in_3_valid = io_req_3_0_valid & io_req_3_0_bits_vc_sel_2_0; // @[SwitchAllocator.scala:95:37]
wire arbs_3_io_in_3_valid = io_req_3_0_valid & io_req_3_0_bits_vc_sel_3_0; // @[SwitchAllocator.scala:95:37]
wire arbs_4_io_in_3_valid = io_req_3_0_valid & io_req_3_0_bits_vc_sel_4_0; // @[SwitchAllocator.scala:95:37]
wire arbs_5_io_in_3_valid = io_req_3_0_valid & io_req_3_0_bits_vc_sel_5_0; // @[SwitchAllocator.scala:95:37]
wire arbs_0_io_in_4_valid = io_req_4_0_valid & (io_req_4_0_bits_vc_sel_0_0 | io_req_4_0_bits_vc_sel_0_1 | io_req_4_0_bits_vc_sel_0_2 | io_req_4_0_bits_vc_sel_0_3 | io_req_4_0_bits_vc_sel_0_4 | io_req_4_0_bits_vc_sel_0_5 | io_req_4_0_bits_vc_sel_0_6 | io_req_4_0_bits_vc_sel_0_7 | io_req_4_0_bits_vc_sel_0_8 | io_req_4_0_bits_vc_sel_0_9 | io_req_4_0_bits_vc_sel_0_10 | io_req_4_0_bits_vc_sel_0_11 | io_req_4_0_bits_vc_sel_0_12 | io_req_4_0_bits_vc_sel_0_13 | io_req_4_0_bits_vc_sel_0_14 | io_req_4_0_bits_vc_sel_0_15 | io_req_4_0_bits_vc_sel_0_16 | io_req_4_0_bits_vc_sel_0_17 | io_req_4_0_bits_vc_sel_0_18 | io_req_4_0_bits_vc_sel_0_19 | io_req_4_0_bits_vc_sel_0_20 | io_req_4_0_bits_vc_sel_0_21); // @[SwitchAllocator.scala:95:{37,65}]
wire arbs_1_io_in_4_valid = io_req_4_0_valid & io_req_4_0_bits_vc_sel_1_0; // @[SwitchAllocator.scala:95:37]
wire arbs_2_io_in_4_valid = io_req_4_0_valid & io_req_4_0_bits_vc_sel_2_0; // @[SwitchAllocator.scala:95:37]
wire arbs_3_io_in_4_valid = io_req_4_0_valid & io_req_4_0_bits_vc_sel_3_0; // @[SwitchAllocator.scala:95:37]
wire arbs_4_io_in_4_valid = io_req_4_0_valid & io_req_4_0_bits_vc_sel_4_0; // @[SwitchAllocator.scala:95:37]
wire arbs_5_io_in_4_valid = io_req_4_0_valid & io_req_4_0_bits_vc_sel_5_0; // @[SwitchAllocator.scala:95:37]
wire arbs_0_io_in_5_valid = io_req_5_0_valid & (io_req_5_0_bits_vc_sel_0_0 | io_req_5_0_bits_vc_sel_0_1 | io_req_5_0_bits_vc_sel_0_2 | io_req_5_0_bits_vc_sel_0_3 | io_req_5_0_bits_vc_sel_0_4 | io_req_5_0_bits_vc_sel_0_5 | io_req_5_0_bits_vc_sel_0_6 | io_req_5_0_bits_vc_sel_0_7 | io_req_5_0_bits_vc_sel_0_8 | io_req_5_0_bits_vc_sel_0_9 | io_req_5_0_bits_vc_sel_0_10 | io_req_5_0_bits_vc_sel_0_11 | io_req_5_0_bits_vc_sel_0_12 | io_req_5_0_bits_vc_sel_0_13 | io_req_5_0_bits_vc_sel_0_14 | io_req_5_0_bits_vc_sel_0_15 | io_req_5_0_bits_vc_sel_0_16 | io_req_5_0_bits_vc_sel_0_17 | io_req_5_0_bits_vc_sel_0_18 | io_req_5_0_bits_vc_sel_0_19 | io_req_5_0_bits_vc_sel_0_20 | io_req_5_0_bits_vc_sel_0_21); // @[SwitchAllocator.scala:95:{37,65}]
wire arbs_1_io_in_5_valid = io_req_5_0_valid & io_req_5_0_bits_vc_sel_1_0; // @[SwitchAllocator.scala:95:37]
wire arbs_2_io_in_5_valid = io_req_5_0_valid & io_req_5_0_bits_vc_sel_2_0; // @[SwitchAllocator.scala:95:37]
wire arbs_3_io_in_5_valid = io_req_5_0_valid & io_req_5_0_bits_vc_sel_3_0; // @[SwitchAllocator.scala:95:37]
wire arbs_4_io_in_5_valid = io_req_5_0_valid & io_req_5_0_bits_vc_sel_4_0; // @[SwitchAllocator.scala:95:37]
wire arbs_5_io_in_5_valid = io_req_5_0_valid & io_req_5_0_bits_vc_sel_5_0; // @[SwitchAllocator.scala:95:37]
wire io_credit_alloc_1_0_alloc_0 = _arbs_1_io_out_0_valid & _arbs_1_io_out_0_bits_vc_sel_1_0; // @[SwitchAllocator.scala:83:45, :120:33]
wire io_credit_alloc_2_0_alloc_0 = _arbs_2_io_out_0_valid & _arbs_2_io_out_0_bits_vc_sel_2_0; // @[SwitchAllocator.scala:83:45, :120:33]
wire io_credit_alloc_3_0_alloc_0 = _arbs_3_io_out_0_valid & _arbs_3_io_out_0_bits_vc_sel_3_0; // @[SwitchAllocator.scala:83:45, :120:33]
wire io_credit_alloc_4_0_alloc_0 = _arbs_4_io_out_0_valid & _arbs_4_io_out_0_bits_vc_sel_4_0; // @[SwitchAllocator.scala:83:45, :120:33]
wire io_credit_alloc_5_0_alloc_0 = _arbs_5_io_out_0_valid & _arbs_5_io_out_0_bits_vc_sel_5_0; // @[SwitchAllocator.scala:83:45, :120:33]
SwitchArbiter_1 arbs_0 ( // @[SwitchAllocator.scala:83:45]
.clock (clock),
.reset (reset),
.io_in_0_ready (/* unused */),
.io_in_0_valid (1'h0),
.io_in_0_bits_vc_sel_5_0 (io_req_0_0_bits_vc_sel_5_0),
.io_in_0_bits_vc_sel_4_0 (io_req_0_0_bits_vc_sel_4_0),
.io_in_0_bits_vc_sel_3_0 (io_req_0_0_bits_vc_sel_3_0),
.io_in_0_bits_vc_sel_2_0 (io_req_0_0_bits_vc_sel_2_0),
.io_in_0_bits_vc_sel_1_0 (io_req_0_0_bits_vc_sel_1_0),
.io_in_0_bits_tail (io_req_0_0_bits_tail),
.io_in_1_ready (_arbs_0_io_in_1_ready),
.io_in_1_valid (arbs_0_io_in_1_valid), // @[SwitchAllocator.scala:95:37]
.io_in_1_bits_vc_sel_5_0 (io_req_1_0_bits_vc_sel_5_0),
.io_in_1_bits_vc_sel_4_0 (io_req_1_0_bits_vc_sel_4_0),
.io_in_1_bits_vc_sel_3_0 (io_req_1_0_bits_vc_sel_3_0),
.io_in_1_bits_vc_sel_2_0 (io_req_1_0_bits_vc_sel_2_0),
.io_in_1_bits_vc_sel_1_0 (io_req_1_0_bits_vc_sel_1_0),
.io_in_1_bits_vc_sel_0_2 (io_req_1_0_bits_vc_sel_0_2),
.io_in_1_bits_vc_sel_0_3 (io_req_1_0_bits_vc_sel_0_3),
.io_in_1_bits_vc_sel_0_8 (io_req_1_0_bits_vc_sel_0_8),
.io_in_1_bits_vc_sel_0_9 (io_req_1_0_bits_vc_sel_0_9),
.io_in_1_bits_vc_sel_0_12 (io_req_1_0_bits_vc_sel_0_12),
.io_in_1_bits_vc_sel_0_13 (io_req_1_0_bits_vc_sel_0_13),
.io_in_1_bits_vc_sel_0_16 (io_req_1_0_bits_vc_sel_0_16),
.io_in_1_bits_vc_sel_0_17 (io_req_1_0_bits_vc_sel_0_17),
.io_in_1_bits_vc_sel_0_18 (io_req_1_0_bits_vc_sel_0_18),
.io_in_1_bits_vc_sel_0_19 (io_req_1_0_bits_vc_sel_0_19),
.io_in_1_bits_vc_sel_0_20 (io_req_1_0_bits_vc_sel_0_20),
.io_in_1_bits_vc_sel_0_21 (io_req_1_0_bits_vc_sel_0_21),
.io_in_1_bits_tail (io_req_1_0_bits_tail),
.io_in_2_ready (_arbs_0_io_in_2_ready),
.io_in_2_valid (arbs_0_io_in_2_valid), // @[SwitchAllocator.scala:95:37]
.io_in_2_bits_vc_sel_5_0 (io_req_2_0_bits_vc_sel_5_0),
.io_in_2_bits_vc_sel_4_0 (io_req_2_0_bits_vc_sel_4_0),
.io_in_2_bits_vc_sel_3_0 (io_req_2_0_bits_vc_sel_3_0),
.io_in_2_bits_vc_sel_2_0 (io_req_2_0_bits_vc_sel_2_0),
.io_in_2_bits_vc_sel_1_0 (io_req_2_0_bits_vc_sel_1_0),
.io_in_2_bits_vc_sel_0_2 (io_req_2_0_bits_vc_sel_0_2),
.io_in_2_bits_vc_sel_0_3 (io_req_2_0_bits_vc_sel_0_3),
.io_in_2_bits_vc_sel_0_8 (io_req_2_0_bits_vc_sel_0_8),
.io_in_2_bits_vc_sel_0_9 (io_req_2_0_bits_vc_sel_0_9),
.io_in_2_bits_vc_sel_0_12 (io_req_2_0_bits_vc_sel_0_12),
.io_in_2_bits_vc_sel_0_13 (io_req_2_0_bits_vc_sel_0_13),
.io_in_2_bits_vc_sel_0_16 (io_req_2_0_bits_vc_sel_0_16),
.io_in_2_bits_vc_sel_0_17 (io_req_2_0_bits_vc_sel_0_17),
.io_in_2_bits_vc_sel_0_18 (io_req_2_0_bits_vc_sel_0_18),
.io_in_2_bits_vc_sel_0_19 (io_req_2_0_bits_vc_sel_0_19),
.io_in_2_bits_vc_sel_0_20 (io_req_2_0_bits_vc_sel_0_20),
.io_in_2_bits_vc_sel_0_21 (io_req_2_0_bits_vc_sel_0_21),
.io_in_2_bits_tail (io_req_2_0_bits_tail),
.io_in_3_ready (_arbs_0_io_in_3_ready),
.io_in_3_valid (arbs_0_io_in_3_valid), // @[SwitchAllocator.scala:95:37]
.io_in_3_bits_vc_sel_5_0 (io_req_3_0_bits_vc_sel_5_0),
.io_in_3_bits_vc_sel_4_0 (io_req_3_0_bits_vc_sel_4_0),
.io_in_3_bits_vc_sel_3_0 (io_req_3_0_bits_vc_sel_3_0),
.io_in_3_bits_vc_sel_2_0 (io_req_3_0_bits_vc_sel_2_0),
.io_in_3_bits_vc_sel_1_0 (io_req_3_0_bits_vc_sel_1_0),
.io_in_3_bits_vc_sel_0_2 (io_req_3_0_bits_vc_sel_0_2),
.io_in_3_bits_vc_sel_0_3 (io_req_3_0_bits_vc_sel_0_3),
.io_in_3_bits_vc_sel_0_8 (io_req_3_0_bits_vc_sel_0_8),
.io_in_3_bits_vc_sel_0_9 (io_req_3_0_bits_vc_sel_0_9),
.io_in_3_bits_vc_sel_0_12 (io_req_3_0_bits_vc_sel_0_12),
.io_in_3_bits_vc_sel_0_13 (io_req_3_0_bits_vc_sel_0_13),
.io_in_3_bits_vc_sel_0_16 (io_req_3_0_bits_vc_sel_0_16),
.io_in_3_bits_vc_sel_0_17 (io_req_3_0_bits_vc_sel_0_17),
.io_in_3_bits_vc_sel_0_18 (io_req_3_0_bits_vc_sel_0_18),
.io_in_3_bits_vc_sel_0_19 (io_req_3_0_bits_vc_sel_0_19),
.io_in_3_bits_vc_sel_0_20 (io_req_3_0_bits_vc_sel_0_20),
.io_in_3_bits_vc_sel_0_21 (io_req_3_0_bits_vc_sel_0_21),
.io_in_3_bits_tail (io_req_3_0_bits_tail),
.io_in_4_ready (_arbs_0_io_in_4_ready),
.io_in_4_valid (arbs_0_io_in_4_valid), // @[SwitchAllocator.scala:95:37]
.io_in_4_bits_vc_sel_5_0 (io_req_4_0_bits_vc_sel_5_0),
.io_in_4_bits_vc_sel_4_0 (io_req_4_0_bits_vc_sel_4_0),
.io_in_4_bits_vc_sel_3_0 (io_req_4_0_bits_vc_sel_3_0),
.io_in_4_bits_vc_sel_2_0 (io_req_4_0_bits_vc_sel_2_0),
.io_in_4_bits_vc_sel_1_0 (io_req_4_0_bits_vc_sel_1_0),
.io_in_4_bits_vc_sel_0_2 (io_req_4_0_bits_vc_sel_0_2),
.io_in_4_bits_vc_sel_0_3 (io_req_4_0_bits_vc_sel_0_3),
.io_in_4_bits_vc_sel_0_8 (io_req_4_0_bits_vc_sel_0_8),
.io_in_4_bits_vc_sel_0_9 (io_req_4_0_bits_vc_sel_0_9),
.io_in_4_bits_vc_sel_0_12 (io_req_4_0_bits_vc_sel_0_12),
.io_in_4_bits_vc_sel_0_13 (io_req_4_0_bits_vc_sel_0_13),
.io_in_4_bits_vc_sel_0_16 (io_req_4_0_bits_vc_sel_0_16),
.io_in_4_bits_vc_sel_0_17 (io_req_4_0_bits_vc_sel_0_17),
.io_in_4_bits_vc_sel_0_18 (io_req_4_0_bits_vc_sel_0_18),
.io_in_4_bits_vc_sel_0_19 (io_req_4_0_bits_vc_sel_0_19),
.io_in_4_bits_vc_sel_0_20 (io_req_4_0_bits_vc_sel_0_20),
.io_in_4_bits_vc_sel_0_21 (io_req_4_0_bits_vc_sel_0_21),
.io_in_4_bits_tail (io_req_4_0_bits_tail),
.io_in_5_ready (_arbs_0_io_in_5_ready),
.io_in_5_valid (arbs_0_io_in_5_valid), // @[SwitchAllocator.scala:95:37]
.io_in_5_bits_vc_sel_5_0 (io_req_5_0_bits_vc_sel_5_0),
.io_in_5_bits_vc_sel_4_0 (io_req_5_0_bits_vc_sel_4_0),
.io_in_5_bits_vc_sel_3_0 (io_req_5_0_bits_vc_sel_3_0),
.io_in_5_bits_vc_sel_2_0 (io_req_5_0_bits_vc_sel_2_0),
.io_in_5_bits_vc_sel_1_0 (io_req_5_0_bits_vc_sel_1_0),
.io_in_5_bits_vc_sel_0_2 (io_req_5_0_bits_vc_sel_0_2),
.io_in_5_bits_vc_sel_0_3 (io_req_5_0_bits_vc_sel_0_3),
.io_in_5_bits_vc_sel_0_8 (io_req_5_0_bits_vc_sel_0_8),
.io_in_5_bits_vc_sel_0_9 (io_req_5_0_bits_vc_sel_0_9),
.io_in_5_bits_vc_sel_0_12 (io_req_5_0_bits_vc_sel_0_12),
.io_in_5_bits_vc_sel_0_13 (io_req_5_0_bits_vc_sel_0_13),
.io_in_5_bits_vc_sel_0_16 (io_req_5_0_bits_vc_sel_0_16),
.io_in_5_bits_vc_sel_0_17 (io_req_5_0_bits_vc_sel_0_17),
.io_in_5_bits_vc_sel_0_18 (io_req_5_0_bits_vc_sel_0_18),
.io_in_5_bits_vc_sel_0_19 (io_req_5_0_bits_vc_sel_0_19),
.io_in_5_bits_vc_sel_0_20 (io_req_5_0_bits_vc_sel_0_20),
.io_in_5_bits_vc_sel_0_21 (io_req_5_0_bits_vc_sel_0_21),
.io_in_5_bits_tail (io_req_5_0_bits_tail),
.io_out_0_valid (_arbs_0_io_out_0_valid),
.io_out_0_bits_vc_sel_5_0 (/* unused */),
.io_out_0_bits_vc_sel_4_0 (/* unused */),
.io_out_0_bits_vc_sel_3_0 (/* unused */),
.io_out_0_bits_vc_sel_2_0 (/* unused */),
.io_out_0_bits_vc_sel_1_0 (/* unused */),
.io_out_0_bits_vc_sel_0_2 (_arbs_0_io_out_0_bits_vc_sel_0_2),
.io_out_0_bits_vc_sel_0_3 (_arbs_0_io_out_0_bits_vc_sel_0_3),
.io_out_0_bits_vc_sel_0_8 (_arbs_0_io_out_0_bits_vc_sel_0_8),
.io_out_0_bits_vc_sel_0_9 (_arbs_0_io_out_0_bits_vc_sel_0_9),
.io_out_0_bits_vc_sel_0_12 (_arbs_0_io_out_0_bits_vc_sel_0_12),
.io_out_0_bits_vc_sel_0_13 (_arbs_0_io_out_0_bits_vc_sel_0_13),
.io_out_0_bits_vc_sel_0_16 (_arbs_0_io_out_0_bits_vc_sel_0_16),
.io_out_0_bits_vc_sel_0_17 (_arbs_0_io_out_0_bits_vc_sel_0_17),
.io_out_0_bits_vc_sel_0_18 (_arbs_0_io_out_0_bits_vc_sel_0_18),
.io_out_0_bits_vc_sel_0_19 (_arbs_0_io_out_0_bits_vc_sel_0_19),
.io_out_0_bits_vc_sel_0_20 (_arbs_0_io_out_0_bits_vc_sel_0_20),
.io_out_0_bits_vc_sel_0_21 (_arbs_0_io_out_0_bits_vc_sel_0_21),
.io_out_0_bits_tail (/* unused */),
.io_chosen_oh_0 (_arbs_0_io_chosen_oh_0)
); // @[SwitchAllocator.scala:83:45]
SwitchArbiter_1 arbs_1 ( // @[SwitchAllocator.scala:83:45]
.clock (clock),
.reset (reset),
.io_in_0_ready (_arbs_1_io_in_0_ready),
.io_in_0_valid (arbs_1_io_in_0_valid), // @[SwitchAllocator.scala:95:37]
.io_in_0_bits_vc_sel_5_0 (io_req_0_0_bits_vc_sel_5_0),
.io_in_0_bits_vc_sel_4_0 (io_req_0_0_bits_vc_sel_4_0),
.io_in_0_bits_vc_sel_3_0 (io_req_0_0_bits_vc_sel_3_0),
.io_in_0_bits_vc_sel_2_0 (io_req_0_0_bits_vc_sel_2_0),
.io_in_0_bits_vc_sel_1_0 (io_req_0_0_bits_vc_sel_1_0),
.io_in_0_bits_tail (io_req_0_0_bits_tail),
.io_in_1_ready (_arbs_1_io_in_1_ready),
.io_in_1_valid (arbs_1_io_in_1_valid), // @[SwitchAllocator.scala:95:37]
.io_in_1_bits_vc_sel_5_0 (io_req_1_0_bits_vc_sel_5_0),
.io_in_1_bits_vc_sel_4_0 (io_req_1_0_bits_vc_sel_4_0),
.io_in_1_bits_vc_sel_3_0 (io_req_1_0_bits_vc_sel_3_0),
.io_in_1_bits_vc_sel_2_0 (io_req_1_0_bits_vc_sel_2_0),
.io_in_1_bits_vc_sel_1_0 (io_req_1_0_bits_vc_sel_1_0),
.io_in_1_bits_vc_sel_0_2 (io_req_1_0_bits_vc_sel_0_2),
.io_in_1_bits_vc_sel_0_3 (io_req_1_0_bits_vc_sel_0_3),
.io_in_1_bits_vc_sel_0_8 (io_req_1_0_bits_vc_sel_0_8),
.io_in_1_bits_vc_sel_0_9 (io_req_1_0_bits_vc_sel_0_9),
.io_in_1_bits_vc_sel_0_12 (io_req_1_0_bits_vc_sel_0_12),
.io_in_1_bits_vc_sel_0_13 (io_req_1_0_bits_vc_sel_0_13),
.io_in_1_bits_vc_sel_0_16 (io_req_1_0_bits_vc_sel_0_16),
.io_in_1_bits_vc_sel_0_17 (io_req_1_0_bits_vc_sel_0_17),
.io_in_1_bits_vc_sel_0_18 (io_req_1_0_bits_vc_sel_0_18),
.io_in_1_bits_vc_sel_0_19 (io_req_1_0_bits_vc_sel_0_19),
.io_in_1_bits_vc_sel_0_20 (io_req_1_0_bits_vc_sel_0_20),
.io_in_1_bits_vc_sel_0_21 (io_req_1_0_bits_vc_sel_0_21),
.io_in_1_bits_tail (io_req_1_0_bits_tail),
.io_in_2_ready (_arbs_1_io_in_2_ready),
.io_in_2_valid (arbs_1_io_in_2_valid), // @[SwitchAllocator.scala:95:37]
.io_in_2_bits_vc_sel_5_0 (io_req_2_0_bits_vc_sel_5_0),
.io_in_2_bits_vc_sel_4_0 (io_req_2_0_bits_vc_sel_4_0),
.io_in_2_bits_vc_sel_3_0 (io_req_2_0_bits_vc_sel_3_0),
.io_in_2_bits_vc_sel_2_0 (io_req_2_0_bits_vc_sel_2_0),
.io_in_2_bits_vc_sel_1_0 (io_req_2_0_bits_vc_sel_1_0),
.io_in_2_bits_vc_sel_0_2 (io_req_2_0_bits_vc_sel_0_2),
.io_in_2_bits_vc_sel_0_3 (io_req_2_0_bits_vc_sel_0_3),
.io_in_2_bits_vc_sel_0_8 (io_req_2_0_bits_vc_sel_0_8),
.io_in_2_bits_vc_sel_0_9 (io_req_2_0_bits_vc_sel_0_9),
.io_in_2_bits_vc_sel_0_12 (io_req_2_0_bits_vc_sel_0_12),
.io_in_2_bits_vc_sel_0_13 (io_req_2_0_bits_vc_sel_0_13),
.io_in_2_bits_vc_sel_0_16 (io_req_2_0_bits_vc_sel_0_16),
.io_in_2_bits_vc_sel_0_17 (io_req_2_0_bits_vc_sel_0_17),
.io_in_2_bits_vc_sel_0_18 (io_req_2_0_bits_vc_sel_0_18),
.io_in_2_bits_vc_sel_0_19 (io_req_2_0_bits_vc_sel_0_19),
.io_in_2_bits_vc_sel_0_20 (io_req_2_0_bits_vc_sel_0_20),
.io_in_2_bits_vc_sel_0_21 (io_req_2_0_bits_vc_sel_0_21),
.io_in_2_bits_tail (io_req_2_0_bits_tail),
.io_in_3_ready (_arbs_1_io_in_3_ready),
.io_in_3_valid (arbs_1_io_in_3_valid), // @[SwitchAllocator.scala:95:37]
.io_in_3_bits_vc_sel_5_0 (io_req_3_0_bits_vc_sel_5_0),
.io_in_3_bits_vc_sel_4_0 (io_req_3_0_bits_vc_sel_4_0),
.io_in_3_bits_vc_sel_3_0 (io_req_3_0_bits_vc_sel_3_0),
.io_in_3_bits_vc_sel_2_0 (io_req_3_0_bits_vc_sel_2_0),
.io_in_3_bits_vc_sel_1_0 (io_req_3_0_bits_vc_sel_1_0),
.io_in_3_bits_vc_sel_0_2 (io_req_3_0_bits_vc_sel_0_2),
.io_in_3_bits_vc_sel_0_3 (io_req_3_0_bits_vc_sel_0_3),
.io_in_3_bits_vc_sel_0_8 (io_req_3_0_bits_vc_sel_0_8),
.io_in_3_bits_vc_sel_0_9 (io_req_3_0_bits_vc_sel_0_9),
.io_in_3_bits_vc_sel_0_12 (io_req_3_0_bits_vc_sel_0_12),
.io_in_3_bits_vc_sel_0_13 (io_req_3_0_bits_vc_sel_0_13),
.io_in_3_bits_vc_sel_0_16 (io_req_3_0_bits_vc_sel_0_16),
.io_in_3_bits_vc_sel_0_17 (io_req_3_0_bits_vc_sel_0_17),
.io_in_3_bits_vc_sel_0_18 (io_req_3_0_bits_vc_sel_0_18),
.io_in_3_bits_vc_sel_0_19 (io_req_3_0_bits_vc_sel_0_19),
.io_in_3_bits_vc_sel_0_20 (io_req_3_0_bits_vc_sel_0_20),
.io_in_3_bits_vc_sel_0_21 (io_req_3_0_bits_vc_sel_0_21),
.io_in_3_bits_tail (io_req_3_0_bits_tail),
.io_in_4_ready (_arbs_1_io_in_4_ready),
.io_in_4_valid (arbs_1_io_in_4_valid), // @[SwitchAllocator.scala:95:37]
.io_in_4_bits_vc_sel_5_0 (io_req_4_0_bits_vc_sel_5_0),
.io_in_4_bits_vc_sel_4_0 (io_req_4_0_bits_vc_sel_4_0),
.io_in_4_bits_vc_sel_3_0 (io_req_4_0_bits_vc_sel_3_0),
.io_in_4_bits_vc_sel_2_0 (io_req_4_0_bits_vc_sel_2_0),
.io_in_4_bits_vc_sel_1_0 (io_req_4_0_bits_vc_sel_1_0),
.io_in_4_bits_vc_sel_0_2 (io_req_4_0_bits_vc_sel_0_2),
.io_in_4_bits_vc_sel_0_3 (io_req_4_0_bits_vc_sel_0_3),
.io_in_4_bits_vc_sel_0_8 (io_req_4_0_bits_vc_sel_0_8),
.io_in_4_bits_vc_sel_0_9 (io_req_4_0_bits_vc_sel_0_9),
.io_in_4_bits_vc_sel_0_12 (io_req_4_0_bits_vc_sel_0_12),
.io_in_4_bits_vc_sel_0_13 (io_req_4_0_bits_vc_sel_0_13),
.io_in_4_bits_vc_sel_0_16 (io_req_4_0_bits_vc_sel_0_16),
.io_in_4_bits_vc_sel_0_17 (io_req_4_0_bits_vc_sel_0_17),
.io_in_4_bits_vc_sel_0_18 (io_req_4_0_bits_vc_sel_0_18),
.io_in_4_bits_vc_sel_0_19 (io_req_4_0_bits_vc_sel_0_19),
.io_in_4_bits_vc_sel_0_20 (io_req_4_0_bits_vc_sel_0_20),
.io_in_4_bits_vc_sel_0_21 (io_req_4_0_bits_vc_sel_0_21),
.io_in_4_bits_tail (io_req_4_0_bits_tail),
.io_in_5_ready (_arbs_1_io_in_5_ready),
.io_in_5_valid (arbs_1_io_in_5_valid), // @[SwitchAllocator.scala:95:37]
.io_in_5_bits_vc_sel_5_0 (io_req_5_0_bits_vc_sel_5_0),
.io_in_5_bits_vc_sel_4_0 (io_req_5_0_bits_vc_sel_4_0),
.io_in_5_bits_vc_sel_3_0 (io_req_5_0_bits_vc_sel_3_0),
.io_in_5_bits_vc_sel_2_0 (io_req_5_0_bits_vc_sel_2_0),
.io_in_5_bits_vc_sel_1_0 (io_req_5_0_bits_vc_sel_1_0),
.io_in_5_bits_vc_sel_0_2 (io_req_5_0_bits_vc_sel_0_2),
.io_in_5_bits_vc_sel_0_3 (io_req_5_0_bits_vc_sel_0_3),
.io_in_5_bits_vc_sel_0_8 (io_req_5_0_bits_vc_sel_0_8),
.io_in_5_bits_vc_sel_0_9 (io_req_5_0_bits_vc_sel_0_9),
.io_in_5_bits_vc_sel_0_12 (io_req_5_0_bits_vc_sel_0_12),
.io_in_5_bits_vc_sel_0_13 (io_req_5_0_bits_vc_sel_0_13),
.io_in_5_bits_vc_sel_0_16 (io_req_5_0_bits_vc_sel_0_16),
.io_in_5_bits_vc_sel_0_17 (io_req_5_0_bits_vc_sel_0_17),
.io_in_5_bits_vc_sel_0_18 (io_req_5_0_bits_vc_sel_0_18),
.io_in_5_bits_vc_sel_0_19 (io_req_5_0_bits_vc_sel_0_19),
.io_in_5_bits_vc_sel_0_20 (io_req_5_0_bits_vc_sel_0_20),
.io_in_5_bits_vc_sel_0_21 (io_req_5_0_bits_vc_sel_0_21),
.io_in_5_bits_tail (io_req_5_0_bits_tail),
.io_out_0_valid (_arbs_1_io_out_0_valid),
.io_out_0_bits_vc_sel_5_0 (/* unused */),
.io_out_0_bits_vc_sel_4_0 (/* unused */),
.io_out_0_bits_vc_sel_3_0 (/* unused */),
.io_out_0_bits_vc_sel_2_0 (/* unused */),
.io_out_0_bits_vc_sel_1_0 (_arbs_1_io_out_0_bits_vc_sel_1_0),
.io_out_0_bits_vc_sel_0_2 (/* unused */),
.io_out_0_bits_vc_sel_0_3 (/* unused */),
.io_out_0_bits_vc_sel_0_8 (/* unused */),
.io_out_0_bits_vc_sel_0_9 (/* unused */),
.io_out_0_bits_vc_sel_0_12 (/* unused */),
.io_out_0_bits_vc_sel_0_13 (/* unused */),
.io_out_0_bits_vc_sel_0_16 (/* unused */),
.io_out_0_bits_vc_sel_0_17 (/* unused */),
.io_out_0_bits_vc_sel_0_18 (/* unused */),
.io_out_0_bits_vc_sel_0_19 (/* unused */),
.io_out_0_bits_vc_sel_0_20 (/* unused */),
.io_out_0_bits_vc_sel_0_21 (/* unused */),
.io_out_0_bits_tail (_arbs_1_io_out_0_bits_tail),
.io_chosen_oh_0 (_arbs_1_io_chosen_oh_0)
); // @[SwitchAllocator.scala:83:45]
SwitchArbiter_1 arbs_2 ( // @[SwitchAllocator.scala:83:45]
.clock (clock),
.reset (reset),
.io_in_0_ready (_arbs_2_io_in_0_ready),
.io_in_0_valid (arbs_2_io_in_0_valid), // @[SwitchAllocator.scala:95:37]
.io_in_0_bits_vc_sel_5_0 (io_req_0_0_bits_vc_sel_5_0),
.io_in_0_bits_vc_sel_4_0 (io_req_0_0_bits_vc_sel_4_0),
.io_in_0_bits_vc_sel_3_0 (io_req_0_0_bits_vc_sel_3_0),
.io_in_0_bits_vc_sel_2_0 (io_req_0_0_bits_vc_sel_2_0),
.io_in_0_bits_vc_sel_1_0 (io_req_0_0_bits_vc_sel_1_0),
.io_in_0_bits_tail (io_req_0_0_bits_tail),
.io_in_1_ready (_arbs_2_io_in_1_ready),
.io_in_1_valid (arbs_2_io_in_1_valid), // @[SwitchAllocator.scala:95:37]
.io_in_1_bits_vc_sel_5_0 (io_req_1_0_bits_vc_sel_5_0),
.io_in_1_bits_vc_sel_4_0 (io_req_1_0_bits_vc_sel_4_0),
.io_in_1_bits_vc_sel_3_0 (io_req_1_0_bits_vc_sel_3_0),
.io_in_1_bits_vc_sel_2_0 (io_req_1_0_bits_vc_sel_2_0),
.io_in_1_bits_vc_sel_1_0 (io_req_1_0_bits_vc_sel_1_0),
.io_in_1_bits_vc_sel_0_2 (io_req_1_0_bits_vc_sel_0_2),
.io_in_1_bits_vc_sel_0_3 (io_req_1_0_bits_vc_sel_0_3),
.io_in_1_bits_vc_sel_0_8 (io_req_1_0_bits_vc_sel_0_8),
.io_in_1_bits_vc_sel_0_9 (io_req_1_0_bits_vc_sel_0_9),
.io_in_1_bits_vc_sel_0_12 (io_req_1_0_bits_vc_sel_0_12),
.io_in_1_bits_vc_sel_0_13 (io_req_1_0_bits_vc_sel_0_13),
.io_in_1_bits_vc_sel_0_16 (io_req_1_0_bits_vc_sel_0_16),
.io_in_1_bits_vc_sel_0_17 (io_req_1_0_bits_vc_sel_0_17),
.io_in_1_bits_vc_sel_0_18 (io_req_1_0_bits_vc_sel_0_18),
.io_in_1_bits_vc_sel_0_19 (io_req_1_0_bits_vc_sel_0_19),
.io_in_1_bits_vc_sel_0_20 (io_req_1_0_bits_vc_sel_0_20),
.io_in_1_bits_vc_sel_0_21 (io_req_1_0_bits_vc_sel_0_21),
.io_in_1_bits_tail (io_req_1_0_bits_tail),
.io_in_2_ready (_arbs_2_io_in_2_ready),
.io_in_2_valid (arbs_2_io_in_2_valid), // @[SwitchAllocator.scala:95:37]
.io_in_2_bits_vc_sel_5_0 (io_req_2_0_bits_vc_sel_5_0),
.io_in_2_bits_vc_sel_4_0 (io_req_2_0_bits_vc_sel_4_0),
.io_in_2_bits_vc_sel_3_0 (io_req_2_0_bits_vc_sel_3_0),
.io_in_2_bits_vc_sel_2_0 (io_req_2_0_bits_vc_sel_2_0),
.io_in_2_bits_vc_sel_1_0 (io_req_2_0_bits_vc_sel_1_0),
.io_in_2_bits_vc_sel_0_2 (io_req_2_0_bits_vc_sel_0_2),
.io_in_2_bits_vc_sel_0_3 (io_req_2_0_bits_vc_sel_0_3),
.io_in_2_bits_vc_sel_0_8 (io_req_2_0_bits_vc_sel_0_8),
.io_in_2_bits_vc_sel_0_9 (io_req_2_0_bits_vc_sel_0_9),
.io_in_2_bits_vc_sel_0_12 (io_req_2_0_bits_vc_sel_0_12),
.io_in_2_bits_vc_sel_0_13 (io_req_2_0_bits_vc_sel_0_13),
.io_in_2_bits_vc_sel_0_16 (io_req_2_0_bits_vc_sel_0_16),
.io_in_2_bits_vc_sel_0_17 (io_req_2_0_bits_vc_sel_0_17),
.io_in_2_bits_vc_sel_0_18 (io_req_2_0_bits_vc_sel_0_18),
.io_in_2_bits_vc_sel_0_19 (io_req_2_0_bits_vc_sel_0_19),
.io_in_2_bits_vc_sel_0_20 (io_req_2_0_bits_vc_sel_0_20),
.io_in_2_bits_vc_sel_0_21 (io_req_2_0_bits_vc_sel_0_21),
.io_in_2_bits_tail (io_req_2_0_bits_tail),
.io_in_3_ready (_arbs_2_io_in_3_ready),
.io_in_3_valid (arbs_2_io_in_3_valid), // @[SwitchAllocator.scala:95:37]
.io_in_3_bits_vc_sel_5_0 (io_req_3_0_bits_vc_sel_5_0),
.io_in_3_bits_vc_sel_4_0 (io_req_3_0_bits_vc_sel_4_0),
.io_in_3_bits_vc_sel_3_0 (io_req_3_0_bits_vc_sel_3_0),
.io_in_3_bits_vc_sel_2_0 (io_req_3_0_bits_vc_sel_2_0),
.io_in_3_bits_vc_sel_1_0 (io_req_3_0_bits_vc_sel_1_0),
.io_in_3_bits_vc_sel_0_2 (io_req_3_0_bits_vc_sel_0_2),
.io_in_3_bits_vc_sel_0_3 (io_req_3_0_bits_vc_sel_0_3),
.io_in_3_bits_vc_sel_0_8 (io_req_3_0_bits_vc_sel_0_8),
.io_in_3_bits_vc_sel_0_9 (io_req_3_0_bits_vc_sel_0_9),
.io_in_3_bits_vc_sel_0_12 (io_req_3_0_bits_vc_sel_0_12),
.io_in_3_bits_vc_sel_0_13 (io_req_3_0_bits_vc_sel_0_13),
.io_in_3_bits_vc_sel_0_16 (io_req_3_0_bits_vc_sel_0_16),
.io_in_3_bits_vc_sel_0_17 (io_req_3_0_bits_vc_sel_0_17),
.io_in_3_bits_vc_sel_0_18 (io_req_3_0_bits_vc_sel_0_18),
.io_in_3_bits_vc_sel_0_19 (io_req_3_0_bits_vc_sel_0_19),
.io_in_3_bits_vc_sel_0_20 (io_req_3_0_bits_vc_sel_0_20),
.io_in_3_bits_vc_sel_0_21 (io_req_3_0_bits_vc_sel_0_21),
.io_in_3_bits_tail (io_req_3_0_bits_tail),
.io_in_4_ready (_arbs_2_io_in_4_ready),
.io_in_4_valid (arbs_2_io_in_4_valid), // @[SwitchAllocator.scala:95:37]
.io_in_4_bits_vc_sel_5_0 (io_req_4_0_bits_vc_sel_5_0),
.io_in_4_bits_vc_sel_4_0 (io_req_4_0_bits_vc_sel_4_0),
.io_in_4_bits_vc_sel_3_0 (io_req_4_0_bits_vc_sel_3_0),
.io_in_4_bits_vc_sel_2_0 (io_req_4_0_bits_vc_sel_2_0),
.io_in_4_bits_vc_sel_1_0 (io_req_4_0_bits_vc_sel_1_0),
.io_in_4_bits_vc_sel_0_2 (io_req_4_0_bits_vc_sel_0_2),
.io_in_4_bits_vc_sel_0_3 (io_req_4_0_bits_vc_sel_0_3),
.io_in_4_bits_vc_sel_0_8 (io_req_4_0_bits_vc_sel_0_8),
.io_in_4_bits_vc_sel_0_9 (io_req_4_0_bits_vc_sel_0_9),
.io_in_4_bits_vc_sel_0_12 (io_req_4_0_bits_vc_sel_0_12),
.io_in_4_bits_vc_sel_0_13 (io_req_4_0_bits_vc_sel_0_13),
.io_in_4_bits_vc_sel_0_16 (io_req_4_0_bits_vc_sel_0_16),
.io_in_4_bits_vc_sel_0_17 (io_req_4_0_bits_vc_sel_0_17),
.io_in_4_bits_vc_sel_0_18 (io_req_4_0_bits_vc_sel_0_18),
.io_in_4_bits_vc_sel_0_19 (io_req_4_0_bits_vc_sel_0_19),
.io_in_4_bits_vc_sel_0_20 (io_req_4_0_bits_vc_sel_0_20),
.io_in_4_bits_vc_sel_0_21 (io_req_4_0_bits_vc_sel_0_21),
.io_in_4_bits_tail (io_req_4_0_bits_tail),
.io_in_5_ready (_arbs_2_io_in_5_ready),
.io_in_5_valid (arbs_2_io_in_5_valid), // @[SwitchAllocator.scala:95:37]
.io_in_5_bits_vc_sel_5_0 (io_req_5_0_bits_vc_sel_5_0),
.io_in_5_bits_vc_sel_4_0 (io_req_5_0_bits_vc_sel_4_0),
.io_in_5_bits_vc_sel_3_0 (io_req_5_0_bits_vc_sel_3_0),
.io_in_5_bits_vc_sel_2_0 (io_req_5_0_bits_vc_sel_2_0),
.io_in_5_bits_vc_sel_1_0 (io_req_5_0_bits_vc_sel_1_0),
.io_in_5_bits_vc_sel_0_2 (io_req_5_0_bits_vc_sel_0_2),
.io_in_5_bits_vc_sel_0_3 (io_req_5_0_bits_vc_sel_0_3),
.io_in_5_bits_vc_sel_0_8 (io_req_5_0_bits_vc_sel_0_8),
.io_in_5_bits_vc_sel_0_9 (io_req_5_0_bits_vc_sel_0_9),
.io_in_5_bits_vc_sel_0_12 (io_req_5_0_bits_vc_sel_0_12),
.io_in_5_bits_vc_sel_0_13 (io_req_5_0_bits_vc_sel_0_13),
.io_in_5_bits_vc_sel_0_16 (io_req_5_0_bits_vc_sel_0_16),
.io_in_5_bits_vc_sel_0_17 (io_req_5_0_bits_vc_sel_0_17),
.io_in_5_bits_vc_sel_0_18 (io_req_5_0_bits_vc_sel_0_18),
.io_in_5_bits_vc_sel_0_19 (io_req_5_0_bits_vc_sel_0_19),
.io_in_5_bits_vc_sel_0_20 (io_req_5_0_bits_vc_sel_0_20),
.io_in_5_bits_vc_sel_0_21 (io_req_5_0_bits_vc_sel_0_21),
.io_in_5_bits_tail (io_req_5_0_bits_tail),
.io_out_0_valid (_arbs_2_io_out_0_valid),
.io_out_0_bits_vc_sel_5_0 (/* unused */),
.io_out_0_bits_vc_sel_4_0 (/* unused */),
.io_out_0_bits_vc_sel_3_0 (/* unused */),
.io_out_0_bits_vc_sel_2_0 (_arbs_2_io_out_0_bits_vc_sel_2_0),
.io_out_0_bits_vc_sel_1_0 (/* unused */),
.io_out_0_bits_vc_sel_0_2 (/* unused */),
.io_out_0_bits_vc_sel_0_3 (/* unused */),
.io_out_0_bits_vc_sel_0_8 (/* unused */),
.io_out_0_bits_vc_sel_0_9 (/* unused */),
.io_out_0_bits_vc_sel_0_12 (/* unused */),
.io_out_0_bits_vc_sel_0_13 (/* unused */),
.io_out_0_bits_vc_sel_0_16 (/* unused */),
.io_out_0_bits_vc_sel_0_17 (/* unused */),
.io_out_0_bits_vc_sel_0_18 (/* unused */),
.io_out_0_bits_vc_sel_0_19 (/* unused */),
.io_out_0_bits_vc_sel_0_20 (/* unused */),
.io_out_0_bits_vc_sel_0_21 (/* unused */),
.io_out_0_bits_tail (_arbs_2_io_out_0_bits_tail),
.io_chosen_oh_0 (_arbs_2_io_chosen_oh_0)
); // @[SwitchAllocator.scala:83:45]
SwitchArbiter_1 arbs_3 ( // @[SwitchAllocator.scala:83:45]
.clock (clock),
.reset (reset),
.io_in_0_ready (_arbs_3_io_in_0_ready),
.io_in_0_valid (arbs_3_io_in_0_valid), // @[SwitchAllocator.scala:95:37]
.io_in_0_bits_vc_sel_5_0 (io_req_0_0_bits_vc_sel_5_0),
.io_in_0_bits_vc_sel_4_0 (io_req_0_0_bits_vc_sel_4_0),
.io_in_0_bits_vc_sel_3_0 (io_req_0_0_bits_vc_sel_3_0),
.io_in_0_bits_vc_sel_2_0 (io_req_0_0_bits_vc_sel_2_0),
.io_in_0_bits_vc_sel_1_0 (io_req_0_0_bits_vc_sel_1_0),
.io_in_0_bits_tail (io_req_0_0_bits_tail),
.io_in_1_ready (_arbs_3_io_in_1_ready),
.io_in_1_valid (arbs_3_io_in_1_valid), // @[SwitchAllocator.scala:95:37]
.io_in_1_bits_vc_sel_5_0 (io_req_1_0_bits_vc_sel_5_0),
.io_in_1_bits_vc_sel_4_0 (io_req_1_0_bits_vc_sel_4_0),
.io_in_1_bits_vc_sel_3_0 (io_req_1_0_bits_vc_sel_3_0),
.io_in_1_bits_vc_sel_2_0 (io_req_1_0_bits_vc_sel_2_0),
.io_in_1_bits_vc_sel_1_0 (io_req_1_0_bits_vc_sel_1_0),
.io_in_1_bits_vc_sel_0_2 (io_req_1_0_bits_vc_sel_0_2),
.io_in_1_bits_vc_sel_0_3 (io_req_1_0_bits_vc_sel_0_3),
.io_in_1_bits_vc_sel_0_8 (io_req_1_0_bits_vc_sel_0_8),
.io_in_1_bits_vc_sel_0_9 (io_req_1_0_bits_vc_sel_0_9),
.io_in_1_bits_vc_sel_0_12 (io_req_1_0_bits_vc_sel_0_12),
.io_in_1_bits_vc_sel_0_13 (io_req_1_0_bits_vc_sel_0_13),
.io_in_1_bits_vc_sel_0_16 (io_req_1_0_bits_vc_sel_0_16),
.io_in_1_bits_vc_sel_0_17 (io_req_1_0_bits_vc_sel_0_17),
.io_in_1_bits_vc_sel_0_18 (io_req_1_0_bits_vc_sel_0_18),
.io_in_1_bits_vc_sel_0_19 (io_req_1_0_bits_vc_sel_0_19),
.io_in_1_bits_vc_sel_0_20 (io_req_1_0_bits_vc_sel_0_20),
.io_in_1_bits_vc_sel_0_21 (io_req_1_0_bits_vc_sel_0_21),
.io_in_1_bits_tail (io_req_1_0_bits_tail),
.io_in_2_ready (_arbs_3_io_in_2_ready),
.io_in_2_valid (arbs_3_io_in_2_valid), // @[SwitchAllocator.scala:95:37]
.io_in_2_bits_vc_sel_5_0 (io_req_2_0_bits_vc_sel_5_0),
.io_in_2_bits_vc_sel_4_0 (io_req_2_0_bits_vc_sel_4_0),
.io_in_2_bits_vc_sel_3_0 (io_req_2_0_bits_vc_sel_3_0),
.io_in_2_bits_vc_sel_2_0 (io_req_2_0_bits_vc_sel_2_0),
.io_in_2_bits_vc_sel_1_0 (io_req_2_0_bits_vc_sel_1_0),
.io_in_2_bits_vc_sel_0_2 (io_req_2_0_bits_vc_sel_0_2),
.io_in_2_bits_vc_sel_0_3 (io_req_2_0_bits_vc_sel_0_3),
.io_in_2_bits_vc_sel_0_8 (io_req_2_0_bits_vc_sel_0_8),
.io_in_2_bits_vc_sel_0_9 (io_req_2_0_bits_vc_sel_0_9),
.io_in_2_bits_vc_sel_0_12 (io_req_2_0_bits_vc_sel_0_12),
.io_in_2_bits_vc_sel_0_13 (io_req_2_0_bits_vc_sel_0_13),
.io_in_2_bits_vc_sel_0_16 (io_req_2_0_bits_vc_sel_0_16),
.io_in_2_bits_vc_sel_0_17 (io_req_2_0_bits_vc_sel_0_17),
.io_in_2_bits_vc_sel_0_18 (io_req_2_0_bits_vc_sel_0_18),
.io_in_2_bits_vc_sel_0_19 (io_req_2_0_bits_vc_sel_0_19),
.io_in_2_bits_vc_sel_0_20 (io_req_2_0_bits_vc_sel_0_20),
.io_in_2_bits_vc_sel_0_21 (io_req_2_0_bits_vc_sel_0_21),
.io_in_2_bits_tail (io_req_2_0_bits_tail),
.io_in_3_ready (_arbs_3_io_in_3_ready),
.io_in_3_valid (arbs_3_io_in_3_valid), // @[SwitchAllocator.scala:95:37]
.io_in_3_bits_vc_sel_5_0 (io_req_3_0_bits_vc_sel_5_0),
.io_in_3_bits_vc_sel_4_0 (io_req_3_0_bits_vc_sel_4_0),
.io_in_3_bits_vc_sel_3_0 (io_req_3_0_bits_vc_sel_3_0),
.io_in_3_bits_vc_sel_2_0 (io_req_3_0_bits_vc_sel_2_0),
.io_in_3_bits_vc_sel_1_0 (io_req_3_0_bits_vc_sel_1_0),
.io_in_3_bits_vc_sel_0_2 (io_req_3_0_bits_vc_sel_0_2),
.io_in_3_bits_vc_sel_0_3 (io_req_3_0_bits_vc_sel_0_3),
.io_in_3_bits_vc_sel_0_8 (io_req_3_0_bits_vc_sel_0_8),
.io_in_3_bits_vc_sel_0_9 (io_req_3_0_bits_vc_sel_0_9),
.io_in_3_bits_vc_sel_0_12 (io_req_3_0_bits_vc_sel_0_12),
.io_in_3_bits_vc_sel_0_13 (io_req_3_0_bits_vc_sel_0_13),
.io_in_3_bits_vc_sel_0_16 (io_req_3_0_bits_vc_sel_0_16),
.io_in_3_bits_vc_sel_0_17 (io_req_3_0_bits_vc_sel_0_17),
.io_in_3_bits_vc_sel_0_18 (io_req_3_0_bits_vc_sel_0_18),
.io_in_3_bits_vc_sel_0_19 (io_req_3_0_bits_vc_sel_0_19),
.io_in_3_bits_vc_sel_0_20 (io_req_3_0_bits_vc_sel_0_20),
.io_in_3_bits_vc_sel_0_21 (io_req_3_0_bits_vc_sel_0_21),
.io_in_3_bits_tail (io_req_3_0_bits_tail),
.io_in_4_ready (_arbs_3_io_in_4_ready),
.io_in_4_valid (arbs_3_io_in_4_valid), // @[SwitchAllocator.scala:95:37]
.io_in_4_bits_vc_sel_5_0 (io_req_4_0_bits_vc_sel_5_0),
.io_in_4_bits_vc_sel_4_0 (io_req_4_0_bits_vc_sel_4_0),
.io_in_4_bits_vc_sel_3_0 (io_req_4_0_bits_vc_sel_3_0),
.io_in_4_bits_vc_sel_2_0 (io_req_4_0_bits_vc_sel_2_0),
.io_in_4_bits_vc_sel_1_0 (io_req_4_0_bits_vc_sel_1_0),
.io_in_4_bits_vc_sel_0_2 (io_req_4_0_bits_vc_sel_0_2),
.io_in_4_bits_vc_sel_0_3 (io_req_4_0_bits_vc_sel_0_3),
.io_in_4_bits_vc_sel_0_8 (io_req_4_0_bits_vc_sel_0_8),
.io_in_4_bits_vc_sel_0_9 (io_req_4_0_bits_vc_sel_0_9),
.io_in_4_bits_vc_sel_0_12 (io_req_4_0_bits_vc_sel_0_12),
.io_in_4_bits_vc_sel_0_13 (io_req_4_0_bits_vc_sel_0_13),
.io_in_4_bits_vc_sel_0_16 (io_req_4_0_bits_vc_sel_0_16),
.io_in_4_bits_vc_sel_0_17 (io_req_4_0_bits_vc_sel_0_17),
.io_in_4_bits_vc_sel_0_18 (io_req_4_0_bits_vc_sel_0_18),
.io_in_4_bits_vc_sel_0_19 (io_req_4_0_bits_vc_sel_0_19),
.io_in_4_bits_vc_sel_0_20 (io_req_4_0_bits_vc_sel_0_20),
.io_in_4_bits_vc_sel_0_21 (io_req_4_0_bits_vc_sel_0_21),
.io_in_4_bits_tail (io_req_4_0_bits_tail),
.io_in_5_ready (_arbs_3_io_in_5_ready),
.io_in_5_valid (arbs_3_io_in_5_valid), // @[SwitchAllocator.scala:95:37]
.io_in_5_bits_vc_sel_5_0 (io_req_5_0_bits_vc_sel_5_0),
.io_in_5_bits_vc_sel_4_0 (io_req_5_0_bits_vc_sel_4_0),
.io_in_5_bits_vc_sel_3_0 (io_req_5_0_bits_vc_sel_3_0),
.io_in_5_bits_vc_sel_2_0 (io_req_5_0_bits_vc_sel_2_0),
.io_in_5_bits_vc_sel_1_0 (io_req_5_0_bits_vc_sel_1_0),
.io_in_5_bits_vc_sel_0_2 (io_req_5_0_bits_vc_sel_0_2),
.io_in_5_bits_vc_sel_0_3 (io_req_5_0_bits_vc_sel_0_3),
.io_in_5_bits_vc_sel_0_8 (io_req_5_0_bits_vc_sel_0_8),
.io_in_5_bits_vc_sel_0_9 (io_req_5_0_bits_vc_sel_0_9),
.io_in_5_bits_vc_sel_0_12 (io_req_5_0_bits_vc_sel_0_12),
.io_in_5_bits_vc_sel_0_13 (io_req_5_0_bits_vc_sel_0_13),
.io_in_5_bits_vc_sel_0_16 (io_req_5_0_bits_vc_sel_0_16),
.io_in_5_bits_vc_sel_0_17 (io_req_5_0_bits_vc_sel_0_17),
.io_in_5_bits_vc_sel_0_18 (io_req_5_0_bits_vc_sel_0_18),
.io_in_5_bits_vc_sel_0_19 (io_req_5_0_bits_vc_sel_0_19),
.io_in_5_bits_vc_sel_0_20 (io_req_5_0_bits_vc_sel_0_20),
.io_in_5_bits_vc_sel_0_21 (io_req_5_0_bits_vc_sel_0_21),
.io_in_5_bits_tail (io_req_5_0_bits_tail),
.io_out_0_valid (_arbs_3_io_out_0_valid),
.io_out_0_bits_vc_sel_5_0 (/* unused */),
.io_out_0_bits_vc_sel_4_0 (/* unused */),
.io_out_0_bits_vc_sel_3_0 (_arbs_3_io_out_0_bits_vc_sel_3_0),
.io_out_0_bits_vc_sel_2_0 (/* unused */),
.io_out_0_bits_vc_sel_1_0 (/* unused */),
.io_out_0_bits_vc_sel_0_2 (/* unused */),
.io_out_0_bits_vc_sel_0_3 (/* unused */),
.io_out_0_bits_vc_sel_0_8 (/* unused */),
.io_out_0_bits_vc_sel_0_9 (/* unused */),
.io_out_0_bits_vc_sel_0_12 (/* unused */),
.io_out_0_bits_vc_sel_0_13 (/* unused */),
.io_out_0_bits_vc_sel_0_16 (/* unused */),
.io_out_0_bits_vc_sel_0_17 (/* unused */),
.io_out_0_bits_vc_sel_0_18 (/* unused */),
.io_out_0_bits_vc_sel_0_19 (/* unused */),
.io_out_0_bits_vc_sel_0_20 (/* unused */),
.io_out_0_bits_vc_sel_0_21 (/* unused */),
.io_out_0_bits_tail (_arbs_3_io_out_0_bits_tail),
.io_chosen_oh_0 (_arbs_3_io_chosen_oh_0)
); // @[SwitchAllocator.scala:83:45]
SwitchArbiter_1 arbs_4 ( // @[SwitchAllocator.scala:83:45]
.clock (clock),
.reset (reset),
.io_in_0_ready (_arbs_4_io_in_0_ready),
.io_in_0_valid (arbs_4_io_in_0_valid), // @[SwitchAllocator.scala:95:37]
.io_in_0_bits_vc_sel_5_0 (io_req_0_0_bits_vc_sel_5_0),
.io_in_0_bits_vc_sel_4_0 (io_req_0_0_bits_vc_sel_4_0),
.io_in_0_bits_vc_sel_3_0 (io_req_0_0_bits_vc_sel_3_0),
.io_in_0_bits_vc_sel_2_0 (io_req_0_0_bits_vc_sel_2_0),
.io_in_0_bits_vc_sel_1_0 (io_req_0_0_bits_vc_sel_1_0),
.io_in_0_bits_tail (io_req_0_0_bits_tail),
.io_in_1_ready (_arbs_4_io_in_1_ready),
.io_in_1_valid (arbs_4_io_in_1_valid), // @[SwitchAllocator.scala:95:37]
.io_in_1_bits_vc_sel_5_0 (io_req_1_0_bits_vc_sel_5_0),
.io_in_1_bits_vc_sel_4_0 (io_req_1_0_bits_vc_sel_4_0),
.io_in_1_bits_vc_sel_3_0 (io_req_1_0_bits_vc_sel_3_0),
.io_in_1_bits_vc_sel_2_0 (io_req_1_0_bits_vc_sel_2_0),
.io_in_1_bits_vc_sel_1_0 (io_req_1_0_bits_vc_sel_1_0),
.io_in_1_bits_vc_sel_0_2 (io_req_1_0_bits_vc_sel_0_2),
.io_in_1_bits_vc_sel_0_3 (io_req_1_0_bits_vc_sel_0_3),
.io_in_1_bits_vc_sel_0_8 (io_req_1_0_bits_vc_sel_0_8),
.io_in_1_bits_vc_sel_0_9 (io_req_1_0_bits_vc_sel_0_9),
.io_in_1_bits_vc_sel_0_12 (io_req_1_0_bits_vc_sel_0_12),
.io_in_1_bits_vc_sel_0_13 (io_req_1_0_bits_vc_sel_0_13),
.io_in_1_bits_vc_sel_0_16 (io_req_1_0_bits_vc_sel_0_16),
.io_in_1_bits_vc_sel_0_17 (io_req_1_0_bits_vc_sel_0_17),
.io_in_1_bits_vc_sel_0_18 (io_req_1_0_bits_vc_sel_0_18),
.io_in_1_bits_vc_sel_0_19 (io_req_1_0_bits_vc_sel_0_19),
.io_in_1_bits_vc_sel_0_20 (io_req_1_0_bits_vc_sel_0_20),
.io_in_1_bits_vc_sel_0_21 (io_req_1_0_bits_vc_sel_0_21),
.io_in_1_bits_tail (io_req_1_0_bits_tail),
.io_in_2_ready (_arbs_4_io_in_2_ready),
.io_in_2_valid (arbs_4_io_in_2_valid), // @[SwitchAllocator.scala:95:37]
.io_in_2_bits_vc_sel_5_0 (io_req_2_0_bits_vc_sel_5_0),
.io_in_2_bits_vc_sel_4_0 (io_req_2_0_bits_vc_sel_4_0),
.io_in_2_bits_vc_sel_3_0 (io_req_2_0_bits_vc_sel_3_0),
.io_in_2_bits_vc_sel_2_0 (io_req_2_0_bits_vc_sel_2_0),
.io_in_2_bits_vc_sel_1_0 (io_req_2_0_bits_vc_sel_1_0),
.io_in_2_bits_vc_sel_0_2 (io_req_2_0_bits_vc_sel_0_2),
.io_in_2_bits_vc_sel_0_3 (io_req_2_0_bits_vc_sel_0_3),
.io_in_2_bits_vc_sel_0_8 (io_req_2_0_bits_vc_sel_0_8),
.io_in_2_bits_vc_sel_0_9 (io_req_2_0_bits_vc_sel_0_9),
.io_in_2_bits_vc_sel_0_12 (io_req_2_0_bits_vc_sel_0_12),
.io_in_2_bits_vc_sel_0_13 (io_req_2_0_bits_vc_sel_0_13),
.io_in_2_bits_vc_sel_0_16 (io_req_2_0_bits_vc_sel_0_16),
.io_in_2_bits_vc_sel_0_17 (io_req_2_0_bits_vc_sel_0_17),
.io_in_2_bits_vc_sel_0_18 (io_req_2_0_bits_vc_sel_0_18),
.io_in_2_bits_vc_sel_0_19 (io_req_2_0_bits_vc_sel_0_19),
.io_in_2_bits_vc_sel_0_20 (io_req_2_0_bits_vc_sel_0_20),
.io_in_2_bits_vc_sel_0_21 (io_req_2_0_bits_vc_sel_0_21),
.io_in_2_bits_tail (io_req_2_0_bits_tail),
.io_in_3_ready (_arbs_4_io_in_3_ready),
.io_in_3_valid (arbs_4_io_in_3_valid), // @[SwitchAllocator.scala:95:37]
.io_in_3_bits_vc_sel_5_0 (io_req_3_0_bits_vc_sel_5_0),
.io_in_3_bits_vc_sel_4_0 (io_req_3_0_bits_vc_sel_4_0),
.io_in_3_bits_vc_sel_3_0 (io_req_3_0_bits_vc_sel_3_0),
.io_in_3_bits_vc_sel_2_0 (io_req_3_0_bits_vc_sel_2_0),
.io_in_3_bits_vc_sel_1_0 (io_req_3_0_bits_vc_sel_1_0),
.io_in_3_bits_vc_sel_0_2 (io_req_3_0_bits_vc_sel_0_2),
.io_in_3_bits_vc_sel_0_3 (io_req_3_0_bits_vc_sel_0_3),
.io_in_3_bits_vc_sel_0_8 (io_req_3_0_bits_vc_sel_0_8),
.io_in_3_bits_vc_sel_0_9 (io_req_3_0_bits_vc_sel_0_9),
.io_in_3_bits_vc_sel_0_12 (io_req_3_0_bits_vc_sel_0_12),
.io_in_3_bits_vc_sel_0_13 (io_req_3_0_bits_vc_sel_0_13),
.io_in_3_bits_vc_sel_0_16 (io_req_3_0_bits_vc_sel_0_16),
.io_in_3_bits_vc_sel_0_17 (io_req_3_0_bits_vc_sel_0_17),
.io_in_3_bits_vc_sel_0_18 (io_req_3_0_bits_vc_sel_0_18),
.io_in_3_bits_vc_sel_0_19 (io_req_3_0_bits_vc_sel_0_19),
.io_in_3_bits_vc_sel_0_20 (io_req_3_0_bits_vc_sel_0_20),
.io_in_3_bits_vc_sel_0_21 (io_req_3_0_bits_vc_sel_0_21),
.io_in_3_bits_tail (io_req_3_0_bits_tail),
.io_in_4_ready (_arbs_4_io_in_4_ready),
.io_in_4_valid (arbs_4_io_in_4_valid), // @[SwitchAllocator.scala:95:37]
.io_in_4_bits_vc_sel_5_0 (io_req_4_0_bits_vc_sel_5_0),
.io_in_4_bits_vc_sel_4_0 (io_req_4_0_bits_vc_sel_4_0),
.io_in_4_bits_vc_sel_3_0 (io_req_4_0_bits_vc_sel_3_0),
.io_in_4_bits_vc_sel_2_0 (io_req_4_0_bits_vc_sel_2_0),
.io_in_4_bits_vc_sel_1_0 (io_req_4_0_bits_vc_sel_1_0),
.io_in_4_bits_vc_sel_0_2 (io_req_4_0_bits_vc_sel_0_2),
.io_in_4_bits_vc_sel_0_3 (io_req_4_0_bits_vc_sel_0_3),
.io_in_4_bits_vc_sel_0_8 (io_req_4_0_bits_vc_sel_0_8),
.io_in_4_bits_vc_sel_0_9 (io_req_4_0_bits_vc_sel_0_9),
.io_in_4_bits_vc_sel_0_12 (io_req_4_0_bits_vc_sel_0_12),
.io_in_4_bits_vc_sel_0_13 (io_req_4_0_bits_vc_sel_0_13),
.io_in_4_bits_vc_sel_0_16 (io_req_4_0_bits_vc_sel_0_16),
.io_in_4_bits_vc_sel_0_17 (io_req_4_0_bits_vc_sel_0_17),
.io_in_4_bits_vc_sel_0_18 (io_req_4_0_bits_vc_sel_0_18),
.io_in_4_bits_vc_sel_0_19 (io_req_4_0_bits_vc_sel_0_19),
.io_in_4_bits_vc_sel_0_20 (io_req_4_0_bits_vc_sel_0_20),
.io_in_4_bits_vc_sel_0_21 (io_req_4_0_bits_vc_sel_0_21),
.io_in_4_bits_tail (io_req_4_0_bits_tail),
.io_in_5_ready (_arbs_4_io_in_5_ready),
.io_in_5_valid (arbs_4_io_in_5_valid), // @[SwitchAllocator.scala:95:37]
.io_in_5_bits_vc_sel_5_0 (io_req_5_0_bits_vc_sel_5_0),
.io_in_5_bits_vc_sel_4_0 (io_req_5_0_bits_vc_sel_4_0),
.io_in_5_bits_vc_sel_3_0 (io_req_5_0_bits_vc_sel_3_0),
.io_in_5_bits_vc_sel_2_0 (io_req_5_0_bits_vc_sel_2_0),
.io_in_5_bits_vc_sel_1_0 (io_req_5_0_bits_vc_sel_1_0),
.io_in_5_bits_vc_sel_0_2 (io_req_5_0_bits_vc_sel_0_2),
.io_in_5_bits_vc_sel_0_3 (io_req_5_0_bits_vc_sel_0_3),
.io_in_5_bits_vc_sel_0_8 (io_req_5_0_bits_vc_sel_0_8),
.io_in_5_bits_vc_sel_0_9 (io_req_5_0_bits_vc_sel_0_9),
.io_in_5_bits_vc_sel_0_12 (io_req_5_0_bits_vc_sel_0_12),
.io_in_5_bits_vc_sel_0_13 (io_req_5_0_bits_vc_sel_0_13),
.io_in_5_bits_vc_sel_0_16 (io_req_5_0_bits_vc_sel_0_16),
.io_in_5_bits_vc_sel_0_17 (io_req_5_0_bits_vc_sel_0_17),
.io_in_5_bits_vc_sel_0_18 (io_req_5_0_bits_vc_sel_0_18),
.io_in_5_bits_vc_sel_0_19 (io_req_5_0_bits_vc_sel_0_19),
.io_in_5_bits_vc_sel_0_20 (io_req_5_0_bits_vc_sel_0_20),
.io_in_5_bits_vc_sel_0_21 (io_req_5_0_bits_vc_sel_0_21),
.io_in_5_bits_tail (io_req_5_0_bits_tail),
.io_out_0_valid (_arbs_4_io_out_0_valid),
.io_out_0_bits_vc_sel_5_0 (/* unused */),
.io_out_0_bits_vc_sel_4_0 (_arbs_4_io_out_0_bits_vc_sel_4_0),
.io_out_0_bits_vc_sel_3_0 (/* unused */),
.io_out_0_bits_vc_sel_2_0 (/* unused */),
.io_out_0_bits_vc_sel_1_0 (/* unused */),
.io_out_0_bits_vc_sel_0_2 (/* unused */),
.io_out_0_bits_vc_sel_0_3 (/* unused */),
.io_out_0_bits_vc_sel_0_8 (/* unused */),
.io_out_0_bits_vc_sel_0_9 (/* unused */),
.io_out_0_bits_vc_sel_0_12 (/* unused */),
.io_out_0_bits_vc_sel_0_13 (/* unused */),
.io_out_0_bits_vc_sel_0_16 (/* unused */),
.io_out_0_bits_vc_sel_0_17 (/* unused */),
.io_out_0_bits_vc_sel_0_18 (/* unused */),
.io_out_0_bits_vc_sel_0_19 (/* unused */),
.io_out_0_bits_vc_sel_0_20 (/* unused */),
.io_out_0_bits_vc_sel_0_21 (/* unused */),
.io_out_0_bits_tail (_arbs_4_io_out_0_bits_tail),
.io_chosen_oh_0 (_arbs_4_io_chosen_oh_0)
); // @[SwitchAllocator.scala:83:45]
SwitchArbiter_1 arbs_5 ( // @[SwitchAllocator.scala:83:45]
.clock (clock),
.reset (reset),
.io_in_0_ready (_arbs_5_io_in_0_ready),
.io_in_0_valid (arbs_5_io_in_0_valid), // @[SwitchAllocator.scala:95:37]
.io_in_0_bits_vc_sel_5_0 (io_req_0_0_bits_vc_sel_5_0),
.io_in_0_bits_vc_sel_4_0 (io_req_0_0_bits_vc_sel_4_0),
.io_in_0_bits_vc_sel_3_0 (io_req_0_0_bits_vc_sel_3_0),
.io_in_0_bits_vc_sel_2_0 (io_req_0_0_bits_vc_sel_2_0),
.io_in_0_bits_vc_sel_1_0 (io_req_0_0_bits_vc_sel_1_0),
.io_in_0_bits_tail (io_req_0_0_bits_tail),
.io_in_1_ready (_arbs_5_io_in_1_ready),
.io_in_1_valid (arbs_5_io_in_1_valid), // @[SwitchAllocator.scala:95:37]
.io_in_1_bits_vc_sel_5_0 (io_req_1_0_bits_vc_sel_5_0),
.io_in_1_bits_vc_sel_4_0 (io_req_1_0_bits_vc_sel_4_0),
.io_in_1_bits_vc_sel_3_0 (io_req_1_0_bits_vc_sel_3_0),
.io_in_1_bits_vc_sel_2_0 (io_req_1_0_bits_vc_sel_2_0),
.io_in_1_bits_vc_sel_1_0 (io_req_1_0_bits_vc_sel_1_0),
.io_in_1_bits_vc_sel_0_2 (io_req_1_0_bits_vc_sel_0_2),
.io_in_1_bits_vc_sel_0_3 (io_req_1_0_bits_vc_sel_0_3),
.io_in_1_bits_vc_sel_0_8 (io_req_1_0_bits_vc_sel_0_8),
.io_in_1_bits_vc_sel_0_9 (io_req_1_0_bits_vc_sel_0_9),
.io_in_1_bits_vc_sel_0_12 (io_req_1_0_bits_vc_sel_0_12),
.io_in_1_bits_vc_sel_0_13 (io_req_1_0_bits_vc_sel_0_13),
.io_in_1_bits_vc_sel_0_16 (io_req_1_0_bits_vc_sel_0_16),
.io_in_1_bits_vc_sel_0_17 (io_req_1_0_bits_vc_sel_0_17),
.io_in_1_bits_vc_sel_0_18 (io_req_1_0_bits_vc_sel_0_18),
.io_in_1_bits_vc_sel_0_19 (io_req_1_0_bits_vc_sel_0_19),
.io_in_1_bits_vc_sel_0_20 (io_req_1_0_bits_vc_sel_0_20),
.io_in_1_bits_vc_sel_0_21 (io_req_1_0_bits_vc_sel_0_21),
.io_in_1_bits_tail (io_req_1_0_bits_tail),
.io_in_2_ready (_arbs_5_io_in_2_ready),
.io_in_2_valid (arbs_5_io_in_2_valid), // @[SwitchAllocator.scala:95:37]
.io_in_2_bits_vc_sel_5_0 (io_req_2_0_bits_vc_sel_5_0),
.io_in_2_bits_vc_sel_4_0 (io_req_2_0_bits_vc_sel_4_0),
.io_in_2_bits_vc_sel_3_0 (io_req_2_0_bits_vc_sel_3_0),
.io_in_2_bits_vc_sel_2_0 (io_req_2_0_bits_vc_sel_2_0),
.io_in_2_bits_vc_sel_1_0 (io_req_2_0_bits_vc_sel_1_0),
.io_in_2_bits_vc_sel_0_2 (io_req_2_0_bits_vc_sel_0_2),
.io_in_2_bits_vc_sel_0_3 (io_req_2_0_bits_vc_sel_0_3),
.io_in_2_bits_vc_sel_0_8 (io_req_2_0_bits_vc_sel_0_8),
.io_in_2_bits_vc_sel_0_9 (io_req_2_0_bits_vc_sel_0_9),
.io_in_2_bits_vc_sel_0_12 (io_req_2_0_bits_vc_sel_0_12),
.io_in_2_bits_vc_sel_0_13 (io_req_2_0_bits_vc_sel_0_13),
.io_in_2_bits_vc_sel_0_16 (io_req_2_0_bits_vc_sel_0_16),
.io_in_2_bits_vc_sel_0_17 (io_req_2_0_bits_vc_sel_0_17),
.io_in_2_bits_vc_sel_0_18 (io_req_2_0_bits_vc_sel_0_18),
.io_in_2_bits_vc_sel_0_19 (io_req_2_0_bits_vc_sel_0_19),
.io_in_2_bits_vc_sel_0_20 (io_req_2_0_bits_vc_sel_0_20),
.io_in_2_bits_vc_sel_0_21 (io_req_2_0_bits_vc_sel_0_21),
.io_in_2_bits_tail (io_req_2_0_bits_tail),
.io_in_3_ready (_arbs_5_io_in_3_ready),
.io_in_3_valid (arbs_5_io_in_3_valid), // @[SwitchAllocator.scala:95:37]
.io_in_3_bits_vc_sel_5_0 (io_req_3_0_bits_vc_sel_5_0),
.io_in_3_bits_vc_sel_4_0 (io_req_3_0_bits_vc_sel_4_0),
.io_in_3_bits_vc_sel_3_0 (io_req_3_0_bits_vc_sel_3_0),
.io_in_3_bits_vc_sel_2_0 (io_req_3_0_bits_vc_sel_2_0),
.io_in_3_bits_vc_sel_1_0 (io_req_3_0_bits_vc_sel_1_0),
.io_in_3_bits_vc_sel_0_2 (io_req_3_0_bits_vc_sel_0_2),
.io_in_3_bits_vc_sel_0_3 (io_req_3_0_bits_vc_sel_0_3),
.io_in_3_bits_vc_sel_0_8 (io_req_3_0_bits_vc_sel_0_8),
.io_in_3_bits_vc_sel_0_9 (io_req_3_0_bits_vc_sel_0_9),
.io_in_3_bits_vc_sel_0_12 (io_req_3_0_bits_vc_sel_0_12),
.io_in_3_bits_vc_sel_0_13 (io_req_3_0_bits_vc_sel_0_13),
.io_in_3_bits_vc_sel_0_16 (io_req_3_0_bits_vc_sel_0_16),
.io_in_3_bits_vc_sel_0_17 (io_req_3_0_bits_vc_sel_0_17),
.io_in_3_bits_vc_sel_0_18 (io_req_3_0_bits_vc_sel_0_18),
.io_in_3_bits_vc_sel_0_19 (io_req_3_0_bits_vc_sel_0_19),
.io_in_3_bits_vc_sel_0_20 (io_req_3_0_bits_vc_sel_0_20),
.io_in_3_bits_vc_sel_0_21 (io_req_3_0_bits_vc_sel_0_21),
.io_in_3_bits_tail (io_req_3_0_bits_tail),
.io_in_4_ready (_arbs_5_io_in_4_ready),
.io_in_4_valid (arbs_5_io_in_4_valid), // @[SwitchAllocator.scala:95:37]
.io_in_4_bits_vc_sel_5_0 (io_req_4_0_bits_vc_sel_5_0),
.io_in_4_bits_vc_sel_4_0 (io_req_4_0_bits_vc_sel_4_0),
.io_in_4_bits_vc_sel_3_0 (io_req_4_0_bits_vc_sel_3_0),
.io_in_4_bits_vc_sel_2_0 (io_req_4_0_bits_vc_sel_2_0),
.io_in_4_bits_vc_sel_1_0 (io_req_4_0_bits_vc_sel_1_0),
.io_in_4_bits_vc_sel_0_2 (io_req_4_0_bits_vc_sel_0_2),
.io_in_4_bits_vc_sel_0_3 (io_req_4_0_bits_vc_sel_0_3),
.io_in_4_bits_vc_sel_0_8 (io_req_4_0_bits_vc_sel_0_8),
.io_in_4_bits_vc_sel_0_9 (io_req_4_0_bits_vc_sel_0_9),
.io_in_4_bits_vc_sel_0_12 (io_req_4_0_bits_vc_sel_0_12),
.io_in_4_bits_vc_sel_0_13 (io_req_4_0_bits_vc_sel_0_13),
.io_in_4_bits_vc_sel_0_16 (io_req_4_0_bits_vc_sel_0_16),
.io_in_4_bits_vc_sel_0_17 (io_req_4_0_bits_vc_sel_0_17),
.io_in_4_bits_vc_sel_0_18 (io_req_4_0_bits_vc_sel_0_18),
.io_in_4_bits_vc_sel_0_19 (io_req_4_0_bits_vc_sel_0_19),
.io_in_4_bits_vc_sel_0_20 (io_req_4_0_bits_vc_sel_0_20),
.io_in_4_bits_vc_sel_0_21 (io_req_4_0_bits_vc_sel_0_21),
.io_in_4_bits_tail (io_req_4_0_bits_tail),
.io_in_5_ready (_arbs_5_io_in_5_ready),
.io_in_5_valid (arbs_5_io_in_5_valid), // @[SwitchAllocator.scala:95:37]
.io_in_5_bits_vc_sel_5_0 (io_req_5_0_bits_vc_sel_5_0),
.io_in_5_bits_vc_sel_4_0 (io_req_5_0_bits_vc_sel_4_0),
.io_in_5_bits_vc_sel_3_0 (io_req_5_0_bits_vc_sel_3_0),
.io_in_5_bits_vc_sel_2_0 (io_req_5_0_bits_vc_sel_2_0),
.io_in_5_bits_vc_sel_1_0 (io_req_5_0_bits_vc_sel_1_0),
.io_in_5_bits_vc_sel_0_2 (io_req_5_0_bits_vc_sel_0_2),
.io_in_5_bits_vc_sel_0_3 (io_req_5_0_bits_vc_sel_0_3),
.io_in_5_bits_vc_sel_0_8 (io_req_5_0_bits_vc_sel_0_8),
.io_in_5_bits_vc_sel_0_9 (io_req_5_0_bits_vc_sel_0_9),
.io_in_5_bits_vc_sel_0_12 (io_req_5_0_bits_vc_sel_0_12),
.io_in_5_bits_vc_sel_0_13 (io_req_5_0_bits_vc_sel_0_13),
.io_in_5_bits_vc_sel_0_16 (io_req_5_0_bits_vc_sel_0_16),
.io_in_5_bits_vc_sel_0_17 (io_req_5_0_bits_vc_sel_0_17),
.io_in_5_bits_vc_sel_0_18 (io_req_5_0_bits_vc_sel_0_18),
.io_in_5_bits_vc_sel_0_19 (io_req_5_0_bits_vc_sel_0_19),
.io_in_5_bits_vc_sel_0_20 (io_req_5_0_bits_vc_sel_0_20),
.io_in_5_bits_vc_sel_0_21 (io_req_5_0_bits_vc_sel_0_21),
.io_in_5_bits_tail (io_req_5_0_bits_tail),
.io_out_0_valid (_arbs_5_io_out_0_valid),
.io_out_0_bits_vc_sel_5_0 (_arbs_5_io_out_0_bits_vc_sel_5_0),
.io_out_0_bits_vc_sel_4_0 (/* unused */),
.io_out_0_bits_vc_sel_3_0 (/* unused */),
.io_out_0_bits_vc_sel_2_0 (/* unused */),
.io_out_0_bits_vc_sel_1_0 (/* unused */),
.io_out_0_bits_vc_sel_0_2 (/* unused */),
.io_out_0_bits_vc_sel_0_3 (/* unused */),
.io_out_0_bits_vc_sel_0_8 (/* unused */),
.io_out_0_bits_vc_sel_0_9 (/* unused */),
.io_out_0_bits_vc_sel_0_12 (/* unused */),
.io_out_0_bits_vc_sel_0_13 (/* unused */),
.io_out_0_bits_vc_sel_0_16 (/* unused */),
.io_out_0_bits_vc_sel_0_17 (/* unused */),
.io_out_0_bits_vc_sel_0_18 (/* unused */),
.io_out_0_bits_vc_sel_0_19 (/* unused */),
.io_out_0_bits_vc_sel_0_20 (/* unused */),
.io_out_0_bits_vc_sel_0_21 (/* unused */),
.io_out_0_bits_tail (_arbs_5_io_out_0_bits_tail),
.io_chosen_oh_0 (_arbs_5_io_chosen_oh_0)
); // @[SwitchAllocator.scala:83:45]
assign io_req_5_0_ready = _arbs_0_io_in_5_ready & arbs_0_io_in_5_valid | _arbs_1_io_in_5_ready & arbs_1_io_in_5_valid | _arbs_2_io_in_5_ready & arbs_2_io_in_5_valid | _arbs_3_io_in_5_ready & arbs_3_io_in_5_valid | _arbs_4_io_in_5_ready & arbs_4_io_in_5_valid | _arbs_5_io_in_5_ready & arbs_5_io_in_5_valid; // @[Decoupled.scala:51:35]
assign io_req_4_0_ready = _arbs_0_io_in_4_ready & arbs_0_io_in_4_valid | _arbs_1_io_in_4_ready & arbs_1_io_in_4_valid | _arbs_2_io_in_4_ready & arbs_2_io_in_4_valid | _arbs_3_io_in_4_ready & arbs_3_io_in_4_valid | _arbs_4_io_in_4_ready & arbs_4_io_in_4_valid | _arbs_5_io_in_4_ready & arbs_5_io_in_4_valid; // @[Decoupled.scala:51:35]
assign io_req_3_0_ready = _arbs_0_io_in_3_ready & arbs_0_io_in_3_valid | _arbs_1_io_in_3_ready & arbs_1_io_in_3_valid | _arbs_2_io_in_3_ready & arbs_2_io_in_3_valid | _arbs_3_io_in_3_ready & arbs_3_io_in_3_valid | _arbs_4_io_in_3_ready & arbs_4_io_in_3_valid | _arbs_5_io_in_3_ready & arbs_5_io_in_3_valid; // @[Decoupled.scala:51:35]
assign io_req_2_0_ready = _arbs_0_io_in_2_ready & arbs_0_io_in_2_valid | _arbs_1_io_in_2_ready & arbs_1_io_in_2_valid | _arbs_2_io_in_2_ready & arbs_2_io_in_2_valid | _arbs_3_io_in_2_ready & arbs_3_io_in_2_valid | _arbs_4_io_in_2_ready & arbs_4_io_in_2_valid | _arbs_5_io_in_2_ready & arbs_5_io_in_2_valid; // @[Decoupled.scala:51:35]
assign io_req_1_0_ready = _arbs_0_io_in_1_ready & arbs_0_io_in_1_valid | _arbs_1_io_in_1_ready & arbs_1_io_in_1_valid | _arbs_2_io_in_1_ready & arbs_2_io_in_1_valid | _arbs_3_io_in_1_ready & arbs_3_io_in_1_valid | _arbs_4_io_in_1_ready & arbs_4_io_in_1_valid | _arbs_5_io_in_1_ready & arbs_5_io_in_1_valid; // @[Decoupled.scala:51:35]
assign io_req_0_0_ready = _arbs_1_io_in_0_ready & arbs_1_io_in_0_valid | _arbs_2_io_in_0_ready & arbs_2_io_in_0_valid | _arbs_3_io_in_0_ready & arbs_3_io_in_0_valid | _arbs_4_io_in_0_ready & arbs_4_io_in_0_valid | _arbs_5_io_in_0_ready & arbs_5_io_in_0_valid; // @[Decoupled.scala:51:35]
assign io_credit_alloc_5_0_alloc = io_credit_alloc_5_0_alloc_0; // @[SwitchAllocator.scala:64:7, :120:33]
assign io_credit_alloc_5_0_tail = io_credit_alloc_5_0_alloc_0 & _arbs_5_io_out_0_bits_tail; // @[SwitchAllocator.scala:64:7, :83:45, :116:44, :120:{33,67}, :122:21]
assign io_credit_alloc_4_0_alloc = io_credit_alloc_4_0_alloc_0; // @[SwitchAllocator.scala:64:7, :120:33]
assign io_credit_alloc_4_0_tail = io_credit_alloc_4_0_alloc_0 & _arbs_4_io_out_0_bits_tail; // @[SwitchAllocator.scala:64:7, :83:45, :116:44, :120:{33,67}, :122:21]
assign io_credit_alloc_3_0_alloc = io_credit_alloc_3_0_alloc_0; // @[SwitchAllocator.scala:64:7, :120:33]
assign io_credit_alloc_3_0_tail = io_credit_alloc_3_0_alloc_0 & _arbs_3_io_out_0_bits_tail; // @[SwitchAllocator.scala:64:7, :83:45, :116:44, :120:{33,67}, :122:21]
assign io_credit_alloc_2_0_alloc = io_credit_alloc_2_0_alloc_0; // @[SwitchAllocator.scala:64:7, :120:33]
assign io_credit_alloc_2_0_tail = io_credit_alloc_2_0_alloc_0 & _arbs_2_io_out_0_bits_tail; // @[SwitchAllocator.scala:64:7, :83:45, :116:44, :120:{33,67}, :122:21]
assign io_credit_alloc_1_0_alloc = io_credit_alloc_1_0_alloc_0; // @[SwitchAllocator.scala:64:7, :120:33]
assign io_credit_alloc_1_0_tail = io_credit_alloc_1_0_alloc_0 & _arbs_1_io_out_0_bits_tail; // @[SwitchAllocator.scala:64:7, :83:45, :116:44, :120:{33,67}, :122:21]
assign io_credit_alloc_0_2_alloc = _arbs_0_io_out_0_valid & _arbs_0_io_out_0_bits_vc_sel_0_2; // @[SwitchAllocator.scala:64:7, :83:45, :120:33]
assign io_credit_alloc_0_3_alloc = _arbs_0_io_out_0_valid & _arbs_0_io_out_0_bits_vc_sel_0_3; // @[SwitchAllocator.scala:64:7, :83:45, :120:33]
assign io_credit_alloc_0_8_alloc = _arbs_0_io_out_0_valid & _arbs_0_io_out_0_bits_vc_sel_0_8; // @[SwitchAllocator.scala:64:7, :83:45, :120:33]
assign io_credit_alloc_0_9_alloc = _arbs_0_io_out_0_valid & _arbs_0_io_out_0_bits_vc_sel_0_9; // @[SwitchAllocator.scala:64:7, :83:45, :120:33]
assign io_credit_alloc_0_12_alloc = _arbs_0_io_out_0_valid & _arbs_0_io_out_0_bits_vc_sel_0_12; // @[SwitchAllocator.scala:64:7, :83:45, :120:33]
assign io_credit_alloc_0_13_alloc = _arbs_0_io_out_0_valid & _arbs_0_io_out_0_bits_vc_sel_0_13; // @[SwitchAllocator.scala:64:7, :83:45, :120:33]
assign io_credit_alloc_0_16_alloc = _arbs_0_io_out_0_valid & _arbs_0_io_out_0_bits_vc_sel_0_16; // @[SwitchAllocator.scala:64:7, :83:45, :120:33]
assign io_credit_alloc_0_17_alloc = _arbs_0_io_out_0_valid & _arbs_0_io_out_0_bits_vc_sel_0_17; // @[SwitchAllocator.scala:64:7, :83:45, :120:33]
assign io_credit_alloc_0_18_alloc = _arbs_0_io_out_0_valid & _arbs_0_io_out_0_bits_vc_sel_0_18; // @[SwitchAllocator.scala:64:7, :83:45, :120:33]
assign io_credit_alloc_0_19_alloc = _arbs_0_io_out_0_valid & _arbs_0_io_out_0_bits_vc_sel_0_19; // @[SwitchAllocator.scala:64:7, :83:45, :120:33]
assign io_credit_alloc_0_20_alloc = _arbs_0_io_out_0_valid & _arbs_0_io_out_0_bits_vc_sel_0_20; // @[SwitchAllocator.scala:64:7, :83:45, :120:33]
assign io_credit_alloc_0_21_alloc = _arbs_0_io_out_0_valid & _arbs_0_io_out_0_bits_vc_sel_0_21; // @[SwitchAllocator.scala:64:7, :83:45, :120:33]
assign io_switch_sel_5_0_5_0 = arbs_5_io_in_5_valid & _arbs_5_io_chosen_oh_0[5] & _arbs_5_io_out_0_valid; // @[SwitchAllocator.scala:64:7, :83:45, :95:37, :108:{65,91,97}]
assign io_switch_sel_5_0_4_0 = arbs_5_io_in_4_valid & _arbs_5_io_chosen_oh_0[4] & _arbs_5_io_out_0_valid; // @[SwitchAllocator.scala:64:7, :83:45, :95:37, :108:{65,91,97}]
assign io_switch_sel_5_0_3_0 = arbs_5_io_in_3_valid & _arbs_5_io_chosen_oh_0[3] & _arbs_5_io_out_0_valid; // @[SwitchAllocator.scala:64:7, :83:45, :95:37, :108:{65,91,97}]
assign io_switch_sel_5_0_2_0 = arbs_5_io_in_2_valid & _arbs_5_io_chosen_oh_0[2] & _arbs_5_io_out_0_valid; // @[SwitchAllocator.scala:64:7, :83:45, :95:37, :108:{65,91,97}]
assign io_switch_sel_5_0_1_0 = arbs_5_io_in_1_valid & _arbs_5_io_chosen_oh_0[1] & _arbs_5_io_out_0_valid; // @[SwitchAllocator.scala:64:7, :83:45, :95:37, :108:{65,91,97}]
assign io_switch_sel_5_0_0_0 = arbs_5_io_in_0_valid & _arbs_5_io_chosen_oh_0[0] & _arbs_5_io_out_0_valid; // @[SwitchAllocator.scala:64:7, :83:45, :95:37, :108:{65,91,97}]
assign io_switch_sel_4_0_5_0 = arbs_4_io_in_5_valid & _arbs_4_io_chosen_oh_0[5] & _arbs_4_io_out_0_valid; // @[SwitchAllocator.scala:64:7, :83:45, :95:37, :108:{65,91,97}]
assign io_switch_sel_4_0_4_0 = arbs_4_io_in_4_valid & _arbs_4_io_chosen_oh_0[4] & _arbs_4_io_out_0_valid; // @[SwitchAllocator.scala:64:7, :83:45, :95:37, :108:{65,91,97}]
assign io_switch_sel_4_0_3_0 = arbs_4_io_in_3_valid & _arbs_4_io_chosen_oh_0[3] & _arbs_4_io_out_0_valid; // @[SwitchAllocator.scala:64:7, :83:45, :95:37, :108:{65,91,97}]
assign io_switch_sel_4_0_2_0 = arbs_4_io_in_2_valid & _arbs_4_io_chosen_oh_0[2] & _arbs_4_io_out_0_valid; // @[SwitchAllocator.scala:64:7, :83:45, :95:37, :108:{65,91,97}]
assign io_switch_sel_4_0_1_0 = arbs_4_io_in_1_valid & _arbs_4_io_chosen_oh_0[1] & _arbs_4_io_out_0_valid; // @[SwitchAllocator.scala:64:7, :83:45, :95:37, :108:{65,91,97}]
assign io_switch_sel_4_0_0_0 = arbs_4_io_in_0_valid & _arbs_4_io_chosen_oh_0[0] & _arbs_4_io_out_0_valid; // @[SwitchAllocator.scala:64:7, :83:45, :95:37, :108:{65,91,97}]
assign io_switch_sel_3_0_5_0 = arbs_3_io_in_5_valid & _arbs_3_io_chosen_oh_0[5] & _arbs_3_io_out_0_valid; // @[SwitchAllocator.scala:64:7, :83:45, :95:37, :108:{65,91,97}]
assign io_switch_sel_3_0_4_0 = arbs_3_io_in_4_valid & _arbs_3_io_chosen_oh_0[4] & _arbs_3_io_out_0_valid; // @[SwitchAllocator.scala:64:7, :83:45, :95:37, :108:{65,91,97}]
assign io_switch_sel_3_0_3_0 = arbs_3_io_in_3_valid & _arbs_3_io_chosen_oh_0[3] & _arbs_3_io_out_0_valid; // @[SwitchAllocator.scala:64:7, :83:45, :95:37, :108:{65,91,97}]
assign io_switch_sel_3_0_2_0 = arbs_3_io_in_2_valid & _arbs_3_io_chosen_oh_0[2] & _arbs_3_io_out_0_valid; // @[SwitchAllocator.scala:64:7, :83:45, :95:37, :108:{65,91,97}]
assign io_switch_sel_3_0_1_0 = arbs_3_io_in_1_valid & _arbs_3_io_chosen_oh_0[1] & _arbs_3_io_out_0_valid; // @[SwitchAllocator.scala:64:7, :83:45, :95:37, :108:{65,91,97}]
assign io_switch_sel_3_0_0_0 = arbs_3_io_in_0_valid & _arbs_3_io_chosen_oh_0[0] & _arbs_3_io_out_0_valid; // @[SwitchAllocator.scala:64:7, :83:45, :95:37, :108:{65,91,97}]
assign io_switch_sel_2_0_5_0 = arbs_2_io_in_5_valid & _arbs_2_io_chosen_oh_0[5] & _arbs_2_io_out_0_valid; // @[SwitchAllocator.scala:64:7, :83:45, :95:37, :108:{65,91,97}]
assign io_switch_sel_2_0_4_0 = arbs_2_io_in_4_valid & _arbs_2_io_chosen_oh_0[4] & _arbs_2_io_out_0_valid; // @[SwitchAllocator.scala:64:7, :83:45, :95:37, :108:{65,91,97}]
assign io_switch_sel_2_0_3_0 = arbs_2_io_in_3_valid & _arbs_2_io_chosen_oh_0[3] & _arbs_2_io_out_0_valid; // @[SwitchAllocator.scala:64:7, :83:45, :95:37, :108:{65,91,97}]
assign io_switch_sel_2_0_2_0 = arbs_2_io_in_2_valid & _arbs_2_io_chosen_oh_0[2] & _arbs_2_io_out_0_valid; // @[SwitchAllocator.scala:64:7, :83:45, :95:37, :108:{65,91,97}]
assign io_switch_sel_2_0_1_0 = arbs_2_io_in_1_valid & _arbs_2_io_chosen_oh_0[1] & _arbs_2_io_out_0_valid; // @[SwitchAllocator.scala:64:7, :83:45, :95:37, :108:{65,91,97}]
assign io_switch_sel_2_0_0_0 = arbs_2_io_in_0_valid & _arbs_2_io_chosen_oh_0[0] & _arbs_2_io_out_0_valid; // @[SwitchAllocator.scala:64:7, :83:45, :95:37, :108:{65,91,97}]
assign io_switch_sel_1_0_5_0 = arbs_1_io_in_5_valid & _arbs_1_io_chosen_oh_0[5] & _arbs_1_io_out_0_valid; // @[SwitchAllocator.scala:64:7, :83:45, :95:37, :108:{65,91,97}]
assign io_switch_sel_1_0_4_0 = arbs_1_io_in_4_valid & _arbs_1_io_chosen_oh_0[4] & _arbs_1_io_out_0_valid; // @[SwitchAllocator.scala:64:7, :83:45, :95:37, :108:{65,91,97}]
assign io_switch_sel_1_0_3_0 = arbs_1_io_in_3_valid & _arbs_1_io_chosen_oh_0[3] & _arbs_1_io_out_0_valid; // @[SwitchAllocator.scala:64:7, :83:45, :95:37, :108:{65,91,97}]
assign io_switch_sel_1_0_2_0 = arbs_1_io_in_2_valid & _arbs_1_io_chosen_oh_0[2] & _arbs_1_io_out_0_valid; // @[SwitchAllocator.scala:64:7, :83:45, :95:37, :108:{65,91,97}]
assign io_switch_sel_1_0_1_0 = arbs_1_io_in_1_valid & _arbs_1_io_chosen_oh_0[1] & _arbs_1_io_out_0_valid; // @[SwitchAllocator.scala:64:7, :83:45, :95:37, :108:{65,91,97}]
assign io_switch_sel_1_0_0_0 = arbs_1_io_in_0_valid & _arbs_1_io_chosen_oh_0[0] & _arbs_1_io_out_0_valid; // @[SwitchAllocator.scala:64:7, :83:45, :95:37, :108:{65,91,97}]
assign io_switch_sel_0_0_5_0 = arbs_0_io_in_5_valid & _arbs_0_io_chosen_oh_0[5] & _arbs_0_io_out_0_valid; // @[SwitchAllocator.scala:64:7, :83:45, :95:37, :108:{65,91,97}]
assign io_switch_sel_0_0_4_0 = arbs_0_io_in_4_valid & _arbs_0_io_chosen_oh_0[4] & _arbs_0_io_out_0_valid; // @[SwitchAllocator.scala:64:7, :83:45, :95:37, :108:{65,91,97}]
assign io_switch_sel_0_0_3_0 = arbs_0_io_in_3_valid & _arbs_0_io_chosen_oh_0[3] & _arbs_0_io_out_0_valid; // @[SwitchAllocator.scala:64:7, :83:45, :95:37, :108:{65,91,97}]
assign io_switch_sel_0_0_2_0 = arbs_0_io_in_2_valid & _arbs_0_io_chosen_oh_0[2] & _arbs_0_io_out_0_valid; // @[SwitchAllocator.scala:64:7, :83:45, :95:37, :108:{65,91,97}]
assign io_switch_sel_0_0_1_0 = arbs_0_io_in_1_valid & _arbs_0_io_chosen_oh_0[1] & _arbs_0_io_out_0_valid; // @[SwitchAllocator.scala:64:7, :83:45, :95:37, :108:{65,91,97}]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File SynchronizerReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util.{RegEnable, Cat}
/** These wrap behavioral
* shift and next registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
*
* These are built up of *ResetSynchronizerPrimitiveShiftReg,
* intended to be replaced by the integrator's metastable flops chains or replaced
* at this level if they have a multi-bit wide synchronizer primitive.
* The different types vary in their reset behavior:
* NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin
* AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep
* 1-bit-wide shift registers.
* SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg
*
* [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference.
*
* ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross
* Clock Domains.
*/
object SynchronizerResetType extends Enumeration {
val NonSync, Inferred, Sync, Async = Value
}
// Note: this should not be used directly.
// Use the companion object to generate this with the correct reset type mixin.
private class SynchronizerPrimitiveShiftReg(
sync: Int,
init: Boolean,
resetType: SynchronizerResetType.Value)
extends AbstractPipelineReg(1) {
val initInt = if (init) 1 else 0
val initPostfix = resetType match {
case SynchronizerResetType.NonSync => ""
case _ => s"_i${initInt}"
}
override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}"
val chain = List.tabulate(sync) { i =>
val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B)
reg.suggestName(s"sync_$i")
}
chain.last := io.d.asBool
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink := source
}
io.q := chain.head.asUInt
}
private object SynchronizerPrimitiveShiftReg {
def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = {
val gen: () => SynchronizerPrimitiveShiftReg = resetType match {
case SynchronizerResetType.NonSync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
case SynchronizerResetType.Async =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset
case SynchronizerResetType.Sync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset
case SynchronizerResetType.Inferred =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
}
AbstractPipelineReg(gen(), in)
}
}
// Note: This module may end up with a non-AsyncReset type reset.
// But the Primitives within will always have AsyncReset type.
class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int)
extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asAsyncReset){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async)
}
}
io.q := Cat(output.reverse)
}
object AsyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
// Note: This module may end up with a non-Bool type reset.
// But the Primitives within will always have Bool reset type.
@deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2")
class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asBool){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync)
}
}
io.q := Cat(output.reverse)
}
object SyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred)
}
io.q := Cat(output.reverse)
}
object ResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}"
val output = Seq.tabulate(w) { i =>
SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync)
}
io.q := Cat(output.reverse)
}
object SynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, None)
def apply [T <: Data](in: T): T =
apply (in, 3, None)
}
class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module {
override def desiredName = s"ClockCrossingReg_w${w}"
val io = IO(new Bundle{
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
val en = Input(Bool())
})
val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en)
io.q := cdc_reg
}
object ClockCrossingReg {
def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = {
val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit))
name.foreach{ cdc_reg.suggestName(_) }
cdc_reg.io.d := in.asUInt
cdc_reg.io.en := en
cdc_reg.io.q.asTypeOf(in)
}
}
| module AsyncResetSynchronizerShiftReg_w1_d3_i0_124( // @[SynchronizerReg.scala:80:7]
input clock, // @[SynchronizerReg.scala:80:7]
input reset, // @[SynchronizerReg.scala:80:7]
output io_q // @[ShiftReg.scala:36:14]
);
wire _output_T = reset; // @[SynchronizerReg.scala:86:21]
wire io_d = 1'h1; // @[SynchronizerReg.scala:80:7, :87:41]
wire _output_T_1 = 1'h1; // @[SynchronizerReg.scala:80:7, :87:41]
wire output_0; // @[ShiftReg.scala:48:24]
wire io_q_0; // @[SynchronizerReg.scala:80:7]
assign io_q_0 = output_0; // @[SynchronizerReg.scala:80:7]
AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_204 output_chain ( // @[ShiftReg.scala:45:23]
.clock (clock),
.reset (_output_T), // @[SynchronizerReg.scala:86:21]
.io_q (output_0)
); // @[ShiftReg.scala:45:23]
assign io_q = io_q_0; // @[SynchronizerReg.scala:80:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File IngressUnit.scala:
package constellation.router
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.{Field, Parameters}
import freechips.rocketchip.util._
import constellation.channel._
class IngressUnit(
ingressNodeId: Int,
cParam: IngressChannelParams,
outParams: Seq[ChannelParams],
egressParams: Seq[EgressChannelParams],
combineRCVA: Boolean,
combineSAST: Boolean,
)
(implicit p: Parameters) extends AbstractInputUnit(cParam, outParams, egressParams)(p) {
class IngressUnitIO extends AbstractInputUnitIO(cParam, outParams, egressParams) {
val in = Flipped(Decoupled(new IngressFlit(cParam.payloadBits)))
}
val io = IO(new IngressUnitIO)
val route_buffer = Module(new Queue(new Flit(cParam.payloadBits), 2))
val route_q = Module(new Queue(new RouteComputerResp(outParams, egressParams), 2,
flow=combineRCVA))
assert(!(io.in.valid && !cParam.possibleFlows.toSeq.map(_.egressId.U === io.in.bits.egress_id).orR))
route_buffer.io.enq.bits.head := io.in.bits.head
route_buffer.io.enq.bits.tail := io.in.bits.tail
val flows = cParam.possibleFlows.toSeq
if (flows.size == 0) {
route_buffer.io.enq.bits.flow := DontCare
} else {
route_buffer.io.enq.bits.flow.ingress_node := cParam.destId.U
route_buffer.io.enq.bits.flow.ingress_node_id := ingressNodeId.U
route_buffer.io.enq.bits.flow.vnet_id := cParam.vNetId.U
route_buffer.io.enq.bits.flow.egress_node := Mux1H(
flows.map(_.egressId.U === io.in.bits.egress_id),
flows.map(_.egressNode.U)
)
route_buffer.io.enq.bits.flow.egress_node_id := Mux1H(
flows.map(_.egressId.U === io.in.bits.egress_id),
flows.map(_.egressNodeId.U)
)
}
route_buffer.io.enq.bits.payload := io.in.bits.payload
route_buffer.io.enq.bits.virt_channel_id := DontCare
io.router_req.bits.src_virt_id := 0.U
io.router_req.bits.flow := route_buffer.io.enq.bits.flow
val at_dest = route_buffer.io.enq.bits.flow.egress_node === nodeId.U
route_buffer.io.enq.valid := io.in.valid && (
io.router_req.ready || !io.in.bits.head || at_dest)
io.router_req.valid := io.in.valid && route_buffer.io.enq.ready && io.in.bits.head && !at_dest
io.in.ready := route_buffer.io.enq.ready && (
io.router_req.ready || !io.in.bits.head || at_dest)
route_q.io.enq.valid := io.router_req.fire
route_q.io.enq.bits := io.router_resp
when (io.in.fire && io.in.bits.head && at_dest) {
route_q.io.enq.valid := true.B
route_q.io.enq.bits.vc_sel.foreach(_.foreach(_ := false.B))
for (o <- 0 until nEgress) {
when (egressParams(o).egressId.U === io.in.bits.egress_id) {
route_q.io.enq.bits.vc_sel(o+nOutputs)(0) := true.B
}
}
}
assert(!(route_q.io.enq.valid && !route_q.io.enq.ready))
val vcalloc_buffer = Module(new Queue(new Flit(cParam.payloadBits), 2))
val vcalloc_q = Module(new Queue(new VCAllocResp(outParams, egressParams),
1, pipe=true))
vcalloc_buffer.io.enq.bits := route_buffer.io.deq.bits
io.vcalloc_req.bits.vc_sel := route_q.io.deq.bits.vc_sel
io.vcalloc_req.bits.flow := route_buffer.io.deq.bits.flow
io.vcalloc_req.bits.in_vc := 0.U
val head = route_buffer.io.deq.bits.head
val tail = route_buffer.io.deq.bits.tail
vcalloc_buffer.io.enq.valid := (route_buffer.io.deq.valid &&
(route_q.io.deq.valid || !head) &&
(io.vcalloc_req.ready || !head)
)
io.vcalloc_req.valid := (route_buffer.io.deq.valid && route_q.io.deq.valid &&
head && vcalloc_buffer.io.enq.ready && vcalloc_q.io.enq.ready)
route_buffer.io.deq.ready := (vcalloc_buffer.io.enq.ready &&
(route_q.io.deq.valid || !head) &&
(io.vcalloc_req.ready || !head) &&
(vcalloc_q.io.enq.ready || !head))
route_q.io.deq.ready := (route_buffer.io.deq.fire && tail)
vcalloc_q.io.enq.valid := io.vcalloc_req.fire
vcalloc_q.io.enq.bits := io.vcalloc_resp
assert(!(vcalloc_q.io.enq.valid && !vcalloc_q.io.enq.ready))
io.salloc_req(0).bits.vc_sel := vcalloc_q.io.deq.bits.vc_sel
io.salloc_req(0).bits.tail := vcalloc_buffer.io.deq.bits.tail
val c = (vcalloc_q.io.deq.bits.vc_sel.asUInt & io.out_credit_available.asUInt) =/= 0.U
val vcalloc_tail = vcalloc_buffer.io.deq.bits.tail
io.salloc_req(0).valid := vcalloc_buffer.io.deq.valid && vcalloc_q.io.deq.valid && c && !io.block
vcalloc_buffer.io.deq.ready := io.salloc_req(0).ready && vcalloc_q.io.deq.valid && c && !io.block
vcalloc_q.io.deq.ready := vcalloc_tail && vcalloc_buffer.io.deq.fire
val out_bundle = if (combineSAST) {
Wire(Valid(new SwitchBundle(outParams, egressParams)))
} else {
Reg(Valid(new SwitchBundle(outParams, egressParams)))
}
io.out(0) := out_bundle
out_bundle.valid := vcalloc_buffer.io.deq.fire
out_bundle.bits.flit := vcalloc_buffer.io.deq.bits
out_bundle.bits.flit.virt_channel_id := 0.U
val out_channel_oh = vcalloc_q.io.deq.bits.vc_sel.map(_.reduce(_||_)).toSeq
out_bundle.bits.out_virt_channel := Mux1H(out_channel_oh,
vcalloc_q.io.deq.bits.vc_sel.map(v => OHToUInt(v)).toSeq)
io.debug.va_stall := io.vcalloc_req.valid && !io.vcalloc_req.ready
io.debug.sa_stall := io.salloc_req(0).valid && !io.salloc_req(0).ready
// TODO: We should not generate input/ingress/output/egress units for untraversable channels
if (!cParam.traversable) {
io.in.ready := false.B
io.router_req.valid := false.B
io.router_req.bits := DontCare
io.vcalloc_req.valid := false.B
io.vcalloc_req.bits := DontCare
io.salloc_req.foreach(_.valid := false.B)
io.salloc_req.foreach(_.bits := DontCare)
io.out.foreach(_.valid := false.B)
io.out.foreach(_.bits := DontCare)
}
}
| module IngressUnit_3( // @[IngressUnit.scala:11:7]
input clock, // @[IngressUnit.scala:11:7]
input reset, // @[IngressUnit.scala:11:7]
input io_vcalloc_req_ready, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_valid, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_2_0, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_1_0, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_0_0, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_0_1, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_0_2, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_0_3, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_0_4, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_0_5, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_0_6, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_0_7, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_2_0, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_1_0, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_0_0, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_0_1, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_0_2, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_0_3, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_0_4, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_0_5, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_0_6, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_0_7, // @[IngressUnit.scala:24:14]
input io_out_credit_available_2_0, // @[IngressUnit.scala:24:14]
input io_out_credit_available_1_0, // @[IngressUnit.scala:24:14]
input io_out_credit_available_0_0, // @[IngressUnit.scala:24:14]
input io_out_credit_available_0_1, // @[IngressUnit.scala:24:14]
input io_out_credit_available_0_2, // @[IngressUnit.scala:24:14]
input io_out_credit_available_0_3, // @[IngressUnit.scala:24:14]
input io_out_credit_available_0_4, // @[IngressUnit.scala:24:14]
input io_out_credit_available_0_5, // @[IngressUnit.scala:24:14]
input io_out_credit_available_0_6, // @[IngressUnit.scala:24:14]
input io_out_credit_available_0_7, // @[IngressUnit.scala:24:14]
input io_salloc_req_0_ready, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_valid, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_2_0, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_1_0, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_0_0, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_0_1, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_0_2, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_0_3, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_0_4, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_0_5, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_0_6, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_0_7, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_tail, // @[IngressUnit.scala:24:14]
output io_out_0_valid, // @[IngressUnit.scala:24:14]
output io_out_0_bits_flit_head, // @[IngressUnit.scala:24:14]
output io_out_0_bits_flit_tail, // @[IngressUnit.scala:24:14]
output [72:0] io_out_0_bits_flit_payload, // @[IngressUnit.scala:24:14]
output [2:0] io_out_0_bits_flit_flow_vnet_id, // @[IngressUnit.scala:24:14]
output [4:0] io_out_0_bits_flit_flow_ingress_node, // @[IngressUnit.scala:24:14]
output [1:0] io_out_0_bits_flit_flow_ingress_node_id, // @[IngressUnit.scala:24:14]
output [4:0] io_out_0_bits_flit_flow_egress_node, // @[IngressUnit.scala:24:14]
output [1:0] io_out_0_bits_flit_flow_egress_node_id, // @[IngressUnit.scala:24:14]
output [2:0] io_out_0_bits_out_virt_channel, // @[IngressUnit.scala:24:14]
output io_in_ready, // @[IngressUnit.scala:24:14]
input io_in_valid, // @[IngressUnit.scala:24:14]
input io_in_bits_head, // @[IngressUnit.scala:24:14]
input io_in_bits_tail, // @[IngressUnit.scala:24:14]
input [72:0] io_in_bits_payload, // @[IngressUnit.scala:24:14]
input [4:0] io_in_bits_egress_id // @[IngressUnit.scala:24:14]
);
wire _GEN; // @[Decoupled.scala:51:35]
wire _vcalloc_q_io_enq_ready; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_valid; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_2_0; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_1_0; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_0_0; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_0_1; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_0_2; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_0_3; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_0_4; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_0_5; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_0_6; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_0_7; // @[IngressUnit.scala:76:25]
wire _vcalloc_buffer_io_enq_ready; // @[IngressUnit.scala:75:30]
wire _vcalloc_buffer_io_deq_valid; // @[IngressUnit.scala:75:30]
wire _vcalloc_buffer_io_deq_bits_head; // @[IngressUnit.scala:75:30]
wire _vcalloc_buffer_io_deq_bits_tail; // @[IngressUnit.scala:75:30]
wire [72:0] _vcalloc_buffer_io_deq_bits_payload; // @[IngressUnit.scala:75:30]
wire [2:0] _vcalloc_buffer_io_deq_bits_flow_vnet_id; // @[IngressUnit.scala:75:30]
wire [4:0] _vcalloc_buffer_io_deq_bits_flow_ingress_node; // @[IngressUnit.scala:75:30]
wire [1:0] _vcalloc_buffer_io_deq_bits_flow_ingress_node_id; // @[IngressUnit.scala:75:30]
wire [4:0] _vcalloc_buffer_io_deq_bits_flow_egress_node; // @[IngressUnit.scala:75:30]
wire [1:0] _vcalloc_buffer_io_deq_bits_flow_egress_node_id; // @[IngressUnit.scala:75:30]
wire _route_q_io_enq_ready; // @[IngressUnit.scala:27:23]
wire _route_q_io_deq_valid; // @[IngressUnit.scala:27:23]
wire _route_buffer_io_enq_ready; // @[IngressUnit.scala:26:28]
wire _route_buffer_io_deq_valid; // @[IngressUnit.scala:26:28]
wire _route_buffer_io_deq_bits_head; // @[IngressUnit.scala:26:28]
wire _route_buffer_io_deq_bits_tail; // @[IngressUnit.scala:26:28]
wire [72:0] _route_buffer_io_deq_bits_payload; // @[IngressUnit.scala:26:28]
wire [2:0] _route_buffer_io_deq_bits_flow_vnet_id; // @[IngressUnit.scala:26:28]
wire [4:0] _route_buffer_io_deq_bits_flow_ingress_node; // @[IngressUnit.scala:26:28]
wire [1:0] _route_buffer_io_deq_bits_flow_ingress_node_id; // @[IngressUnit.scala:26:28]
wire [4:0] _route_buffer_io_deq_bits_flow_egress_node; // @[IngressUnit.scala:26:28]
wire [1:0] _route_buffer_io_deq_bits_flow_egress_node_id; // @[IngressUnit.scala:26:28]
wire [2:0] _route_buffer_io_deq_bits_virt_channel_id; // @[IngressUnit.scala:26:28]
wire _route_buffer_io_enq_bits_flow_egress_node_id_T = io_in_bits_egress_id == 5'h18; // @[IngressUnit.scala:30:72]
wire _route_buffer_io_enq_bits_flow_egress_node_id_T_1 = io_in_bits_egress_id == 5'h15; // @[IngressUnit.scala:30:72]
wire _route_buffer_io_enq_bits_flow_egress_node_id_T_2 = io_in_bits_egress_id == 5'h12; // @[IngressUnit.scala:30:72]
wire _route_buffer_io_enq_bits_flow_egress_node_id_T_3 = io_in_bits_egress_id == 5'h1B; // @[IngressUnit.scala:30:72]
wire _route_buffer_io_enq_bits_flow_egress_node_id_T_4 = io_in_bits_egress_id == 5'h1E; // @[IngressUnit.scala:30:72]
wire [2:0] _route_buffer_io_enq_bits_flow_egress_node_T_10 = (_route_buffer_io_enq_bits_flow_egress_node_id_T ? 3'h6 : 3'h0) | (_route_buffer_io_enq_bits_flow_egress_node_id_T_1 ? 3'h5 : 3'h0); // @[Mux.scala:30:73]
wire [3:0] _route_buffer_io_enq_bits_flow_egress_node_T_13 = {1'h0, _route_buffer_io_enq_bits_flow_egress_node_T_10[2], _route_buffer_io_enq_bits_flow_egress_node_T_10[1:0] | {2{_route_buffer_io_enq_bits_flow_egress_node_id_T_2}}} | (_route_buffer_io_enq_bits_flow_egress_node_id_T_3 ? 4'h9 : 4'h0) | (_route_buffer_io_enq_bits_flow_egress_node_id_T_4 ? 4'hA : 4'h0); // @[Mux.scala:30:73]
assign _GEN = _route_buffer_io_enq_ready & io_in_valid & io_in_bits_head & _route_buffer_io_enq_bits_flow_egress_node_T_13 == 4'h1; // @[Mux.scala:30:73]
wire route_q_io_enq_valid = _GEN | io_in_valid & _route_buffer_io_enq_ready & io_in_bits_head & _route_buffer_io_enq_bits_flow_egress_node_T_13 != 4'h1; // @[Mux.scala:30:73]
wire io_vcalloc_req_valid_0 = _route_buffer_io_deq_valid & _route_q_io_deq_valid & _route_buffer_io_deq_bits_head & _vcalloc_buffer_io_enq_ready & _vcalloc_q_io_enq_ready; // @[IngressUnit.scala:26:28, :27:23, :75:30, :76:25, :91:{54,78}, :92:{10,41}]
wire route_buffer_io_deq_ready = _vcalloc_buffer_io_enq_ready & (_route_q_io_deq_valid | ~_route_buffer_io_deq_bits_head) & (io_vcalloc_req_ready | ~_route_buffer_io_deq_bits_head) & (_vcalloc_q_io_enq_ready | ~_route_buffer_io_deq_bits_head); // @[IngressUnit.scala:26:28, :27:23, :75:30, :76:25, :88:30, :93:61, :94:{27,37}, :95:{27,37}, :96:29]
wire vcalloc_q_io_enq_valid = io_vcalloc_req_ready & io_vcalloc_req_valid_0; // @[Decoupled.scala:51:35] |
Generate the Verilog code corresponding to the following Chisel files.
File Buffer.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.diplomacy.BufferParams
class TLBufferNode (
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)(implicit valName: ValName) extends TLAdapterNode(
clientFn = { p => p.v1copy(minLatency = p.minLatency + b.latency + c.latency) },
managerFn = { p => p.v1copy(minLatency = p.minLatency + a.latency + d.latency) }
) {
override lazy val nodedebugstring = s"a:${a.toString}, b:${b.toString}, c:${c.toString}, d:${d.toString}, e:${e.toString}"
override def circuitIdentity = List(a,b,c,d,e).forall(_ == BufferParams.none)
}
class TLBuffer(
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)(implicit p: Parameters) extends LazyModule
{
def this(ace: BufferParams, bd: BufferParams)(implicit p: Parameters) = this(ace, bd, ace, bd, ace)
def this(abcde: BufferParams)(implicit p: Parameters) = this(abcde, abcde)
def this()(implicit p: Parameters) = this(BufferParams.default)
val node = new TLBufferNode(a, b, c, d, e)
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
def headBundle = node.out.head._2.bundle
override def desiredName = (Seq("TLBuffer") ++ node.out.headOption.map(_._2.bundle.shortName)).mkString("_")
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
out.a <> a(in .a)
in .d <> d(out.d)
if (edgeOut.manager.anySupportAcquireB && edgeOut.client.anySupportProbe) {
in .b <> b(out.b)
out.c <> c(in .c)
out.e <> e(in .e)
} else {
in.b.valid := false.B
in.c.ready := true.B
in.e.ready := true.B
out.b.ready := true.B
out.c.valid := false.B
out.e.valid := false.B
}
}
}
}
object TLBuffer
{
def apply() (implicit p: Parameters): TLNode = apply(BufferParams.default)
def apply(abcde: BufferParams) (implicit p: Parameters): TLNode = apply(abcde, abcde)
def apply(ace: BufferParams, bd: BufferParams)(implicit p: Parameters): TLNode = apply(ace, bd, ace, bd, ace)
def apply(
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)(implicit p: Parameters): TLNode =
{
val buffer = LazyModule(new TLBuffer(a, b, c, d, e))
buffer.node
}
def chain(depth: Int, name: Option[String] = None)(implicit p: Parameters): Seq[TLNode] = {
val buffers = Seq.fill(depth) { LazyModule(new TLBuffer()) }
name.foreach { n => buffers.zipWithIndex.foreach { case (b, i) => b.suggestName(s"${n}_${i}") } }
buffers.map(_.node)
}
def chainNode(depth: Int, name: Option[String] = None)(implicit p: Parameters): TLNode = {
chain(depth, name)
.reduceLeftOption(_ :*=* _)
.getOrElse(TLNameNode("no_buffer"))
}
}
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
File MixedNode.scala:
package org.chipsalliance.diplomacy.nodes
import chisel3.{Data, DontCare, Wire}
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.{Field, Parameters}
import org.chipsalliance.diplomacy.ValName
import org.chipsalliance.diplomacy.sourceLine
/** One side metadata of a [[Dangle]].
*
* Describes one side of an edge going into or out of a [[BaseNode]].
*
* @param serial
* the global [[BaseNode.serial]] number of the [[BaseNode]] that this [[HalfEdge]] connects to.
* @param index
* the `index` in the [[BaseNode]]'s input or output port list that this [[HalfEdge]] belongs to.
*/
case class HalfEdge(serial: Int, index: Int) extends Ordered[HalfEdge] {
import scala.math.Ordered.orderingToOrdered
def compare(that: HalfEdge): Int = HalfEdge.unapply(this).compare(HalfEdge.unapply(that))
}
/** [[Dangle]] captures the `IO` information of a [[LazyModule]] and which two [[BaseNode]]s the [[Edges]]/[[Bundle]]
* connects.
*
* [[Dangle]]s are generated by [[BaseNode.instantiate]] using [[MixedNode.danglesOut]] and [[MixedNode.danglesIn]] ,
* [[LazyModuleImp.instantiate]] connects those that go to internal or explicit IO connections in a [[LazyModule]].
*
* @param source
* the source [[HalfEdge]] of this [[Dangle]], which captures the source [[BaseNode]] and the port `index` within
* that [[BaseNode]].
* @param sink
* sink [[HalfEdge]] of this [[Dangle]], which captures the sink [[BaseNode]] and the port `index` within that
* [[BaseNode]].
* @param flipped
* flip or not in [[AutoBundle.makeElements]]. If true this corresponds to `danglesOut`, if false it corresponds to
* `danglesIn`.
* @param dataOpt
* actual [[Data]] for the hardware connection. Can be empty if this belongs to a cloned module
*/
case class Dangle(source: HalfEdge, sink: HalfEdge, flipped: Boolean, name: String, dataOpt: Option[Data]) {
def data = dataOpt.get
}
/** [[Edges]] is a collection of parameters describing the functionality and connection for an interface, which is often
* derived from the interconnection protocol and can inform the parameterization of the hardware bundles that actually
* implement the protocol.
*/
case class Edges[EI, EO](in: Seq[EI], out: Seq[EO])
/** A field available in [[Parameters]] used to determine whether [[InwardNodeImp.monitor]] will be called. */
case object MonitorsEnabled extends Field[Boolean](true)
/** When rendering the edge in a graphical format, flip the order in which the edges' source and sink are presented.
*
* For example, when rendering graphML, yEd by default tries to put the source node vertically above the sink node, but
* [[RenderFlipped]] inverts this relationship. When a particular [[LazyModule]] contains both source nodes and sink
* nodes, flipping the rendering of one node's edge will usual produce a more concise visual layout for the
* [[LazyModule]].
*/
case object RenderFlipped extends Field[Boolean](false)
/** The sealed node class in the package, all node are derived from it.
*
* @param inner
* Sink interface implementation.
* @param outer
* Source interface implementation.
* @param valName
* val name of this node.
* @tparam DI
* Downward-flowing parameters received on the inner side of the node. It is usually a brunch of parameters
* describing the protocol parameters from a source. For an [[InwardNode]], it is determined by the connected
* [[OutwardNode]]. Since it can be connected to multiple sources, this parameter is always a Seq of source port
* parameters.
* @tparam UI
* Upward-flowing parameters generated by the inner side of the node. It is usually a brunch of parameters describing
* the protocol parameters of a sink. For an [[InwardNode]], it is determined itself.
* @tparam EI
* Edge Parameters describing a connection on the inner side of the node. It is usually a brunch of transfers
* specified for a sink according to protocol.
* @tparam BI
* Bundle type used when connecting to the inner side of the node. It is a hardware interface of this sink interface.
* It should extends from [[chisel3.Data]], which represents the real hardware.
* @tparam DO
* Downward-flowing parameters generated on the outer side of the node. It is usually a brunch of parameters
* describing the protocol parameters of a source. For an [[OutwardNode]], it is determined itself.
* @tparam UO
* Upward-flowing parameters received by the outer side of the node. It is usually a brunch of parameters describing
* the protocol parameters from a sink. For an [[OutwardNode]], it is determined by the connected [[InwardNode]].
* Since it can be connected to multiple sinks, this parameter is always a Seq of sink port parameters.
* @tparam EO
* Edge Parameters describing a connection on the outer side of the node. It is usually a brunch of transfers
* specified for a source according to protocol.
* @tparam BO
* Bundle type used when connecting to the outer side of the node. It is a hardware interface of this source
* interface. It should extends from [[chisel3.Data]], which represents the real hardware.
*
* @note
* Call Graph of [[MixedNode]]
* - line `─`: source is process by a function and generate pass to others
* - Arrow `→`: target of arrow is generated by source
*
* {{{
* (from the other node)
* ┌─────────────────────────────────────────────────────────[[InwardNode.uiParams]]─────────────┐
* ↓ │
* (binding node when elaboration) [[OutwardNode.uoParams]]────────────────────────[[MixedNode.mapParamsU]]→──────────┐ │
* [[InwardNode.accPI]] │ │ │
* │ │ (based on protocol) │
* │ │ [[MixedNode.inner.edgeI]] │
* │ │ ↓ │
* ↓ │ │ │
* (immobilize after elaboration) (inward port from [[OutwardNode]]) │ ↓ │
* [[InwardNode.iBindings]]──┐ [[MixedNode.iDirectPorts]]────────────────────→[[MixedNode.iPorts]] [[InwardNode.uiParams]] │
* │ │ ↑ │ │ │
* │ │ │ [[OutwardNode.doParams]] │ │
* │ │ │ (from the other node) │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* │ │ │ └────────┬──────────────┤ │
* │ │ │ │ │ │
* │ │ │ │ (based on protocol) │
* │ │ │ │ [[MixedNode.inner.edgeI]] │
* │ │ │ │ │ │
* │ │ (from the other node) │ ↓ │
* │ └───[[OutwardNode.oPortMapping]] [[OutwardNode.oStar]] │ [[MixedNode.edgesIn]]───┐ │
* │ ↑ ↑ │ │ ↓ │
* │ │ │ │ │ [[MixedNode.in]] │
* │ │ │ │ ↓ ↑ │
* │ (solve star connection) │ │ │ [[MixedNode.bundleIn]]──┘ │
* ├───[[MixedNode.resolveStar]]→─┼─────────────────────────────┤ └────────────────────────────────────┐ │
* │ │ │ [[MixedNode.bundleOut]]─┐ │ │
* │ │ │ ↑ ↓ │ │
* │ │ │ │ [[MixedNode.out]] │ │
* │ ↓ ↓ │ ↑ │ │
* │ ┌─────[[InwardNode.iPortMapping]] [[InwardNode.iStar]] [[MixedNode.edgesOut]]──┘ │ │
* │ │ (from the other node) ↑ │ │
* │ │ │ │ │ │
* │ │ │ [[MixedNode.outer.edgeO]] │ │
* │ │ │ (based on protocol) │ │
* │ │ │ │ │ │
* │ │ │ ┌────────────────────────────────────────┤ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* (immobilize after elaboration)│ ↓ │ │ │ │
* [[OutwardNode.oBindings]]─┘ [[MixedNode.oDirectPorts]]───→[[MixedNode.oPorts]] [[OutwardNode.doParams]] │ │
* ↑ (inward port from [[OutwardNode]]) │ │ │ │
* │ ┌─────────────────────────────────────────┤ │ │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* [[OutwardNode.accPO]] │ ↓ │ │ │
* (binding node when elaboration) │ [[InwardNode.diParams]]─────→[[MixedNode.mapParamsD]]────────────────────────────┘ │ │
* │ ↑ │ │
* │ └──────────────────────────────────────────────────────────────────────────────────────────┘ │
* └──────────────────────────────────────────────────────────────────────────────────────────────────────────┘
* }}}
*/
abstract class MixedNode[DI, UI, EI, BI <: Data, DO, UO, EO, BO <: Data](
val inner: InwardNodeImp[DI, UI, EI, BI],
val outer: OutwardNodeImp[DO, UO, EO, BO]
)(
implicit valName: ValName)
extends BaseNode
with NodeHandle[DI, UI, EI, BI, DO, UO, EO, BO]
with InwardNode[DI, UI, BI]
with OutwardNode[DO, UO, BO] {
// Generate a [[NodeHandle]] with inward and outward node are both this node.
val inward = this
val outward = this
/** Debug info of nodes binding. */
def bindingInfo: String = s"""$iBindingInfo
|$oBindingInfo
|""".stripMargin
/** Debug info of ports connecting. */
def connectedPortsInfo: String = s"""${oPorts.size} outward ports connected: [${oPorts.map(_._2.name).mkString(",")}]
|${iPorts.size} inward ports connected: [${iPorts.map(_._2.name).mkString(",")}]
|""".stripMargin
/** Debug info of parameters propagations. */
def parametersInfo: String = s"""${doParams.size} downstream outward parameters: [${doParams.mkString(",")}]
|${uoParams.size} upstream outward parameters: [${uoParams.mkString(",")}]
|${diParams.size} downstream inward parameters: [${diParams.mkString(",")}]
|${uiParams.size} upstream inward parameters: [${uiParams.mkString(",")}]
|""".stripMargin
/** For a given node, converts [[OutwardNode.accPO]] and [[InwardNode.accPI]] to [[MixedNode.oPortMapping]] and
* [[MixedNode.iPortMapping]].
*
* Given counts of known inward and outward binding and inward and outward star bindings, return the resolved inward
* stars and outward stars.
*
* This method will also validate the arguments and throw a runtime error if the values are unsuitable for this type
* of node.
*
* @param iKnown
* Number of known-size ([[BIND_ONCE]]) input bindings.
* @param oKnown
* Number of known-size ([[BIND_ONCE]]) output bindings.
* @param iStar
* Number of unknown size ([[BIND_STAR]]) input bindings.
* @param oStar
* Number of unknown size ([[BIND_STAR]]) output bindings.
* @return
* A Tuple of the resolved number of input and output connections.
*/
protected[diplomacy] def resolveStar(iKnown: Int, oKnown: Int, iStar: Int, oStar: Int): (Int, Int)
/** Function to generate downward-flowing outward params from the downward-flowing input params and the current output
* ports.
*
* @param n
* The size of the output sequence to generate.
* @param p
* Sequence of downward-flowing input parameters of this node.
* @return
* A `n`-sized sequence of downward-flowing output edge parameters.
*/
protected[diplomacy] def mapParamsD(n: Int, p: Seq[DI]): Seq[DO]
/** Function to generate upward-flowing input parameters from the upward-flowing output parameters [[uiParams]].
*
* @param n
* Size of the output sequence.
* @param p
* Upward-flowing output edge parameters.
* @return
* A n-sized sequence of upward-flowing input edge parameters.
*/
protected[diplomacy] def mapParamsU(n: Int, p: Seq[UO]): Seq[UI]
/** @return
* The sink cardinality of the node, the number of outputs bound with [[BIND_QUERY]] summed with inputs bound with
* [[BIND_STAR]].
*/
protected[diplomacy] lazy val sinkCard: Int = oBindings.count(_._3 == BIND_QUERY) + iBindings.count(_._3 == BIND_STAR)
/** @return
* The source cardinality of this node, the number of inputs bound with [[BIND_QUERY]] summed with the number of
* output bindings bound with [[BIND_STAR]].
*/
protected[diplomacy] lazy val sourceCard: Int =
iBindings.count(_._3 == BIND_QUERY) + oBindings.count(_._3 == BIND_STAR)
/** @return list of nodes involved in flex bindings with this node. */
protected[diplomacy] lazy val flexes: Seq[BaseNode] =
oBindings.filter(_._3 == BIND_FLEX).map(_._2) ++ iBindings.filter(_._3 == BIND_FLEX).map(_._2)
/** Resolves the flex to be either source or sink and returns the offset where the [[BIND_STAR]] operators begin
* greedily taking up the remaining connections.
*
* @return
* A value >= 0 if it is sink cardinality, a negative value for source cardinality. The magnitude of the return
* value is not relevant.
*/
protected[diplomacy] lazy val flexOffset: Int = {
/** Recursively performs a depth-first search of the [[flexes]], [[BaseNode]]s connected to this node with flex
* operators. The algorithm bottoms out when we either get to a node we have already visited or when we get to a
* connection that is not a flex and can set the direction for us. Otherwise, recurse by visiting the `flexes` of
* each node in the current set and decide whether they should be added to the set or not.
*
* @return
* the mapping of [[BaseNode]] indexed by their serial numbers.
*/
def DFS(v: BaseNode, visited: Map[Int, BaseNode]): Map[Int, BaseNode] = {
if (visited.contains(v.serial) || !v.flexibleArityDirection) {
visited
} else {
v.flexes.foldLeft(visited + (v.serial -> v))((sum, n) => DFS(n, sum))
}
}
/** Determine which [[BaseNode]] are involved in resolving the flex connections to/from this node.
*
* @example
* {{{
* a :*=* b :*=* c
* d :*=* b
* e :*=* f
* }}}
*
* `flexSet` for `a`, `b`, `c`, or `d` will be `Set(a, b, c, d)` `flexSet` for `e` or `f` will be `Set(e,f)`
*/
val flexSet = DFS(this, Map()).values
/** The total number of :*= operators where we're on the left. */
val allSink = flexSet.map(_.sinkCard).sum
/** The total number of :=* operators used when we're on the right. */
val allSource = flexSet.map(_.sourceCard).sum
require(
allSink == 0 || allSource == 0,
s"The nodes ${flexSet.map(_.name)} which are inter-connected by :*=* have ${allSink} :*= operators and ${allSource} :=* operators connected to them, making it impossible to determine cardinality inference direction."
)
allSink - allSource
}
/** @return A value >= 0 if it is sink cardinality, a negative value for source cardinality. */
protected[diplomacy] def edgeArityDirection(n: BaseNode): Int = {
if (flexibleArityDirection) flexOffset
else if (n.flexibleArityDirection) n.flexOffset
else 0
}
/** For a node which is connected between two nodes, select the one that will influence the direction of the flex
* resolution.
*/
protected[diplomacy] def edgeAritySelect(n: BaseNode, l: => Int, r: => Int): Int = {
val dir = edgeArityDirection(n)
if (dir < 0) l
else if (dir > 0) r
else 1
}
/** Ensure that the same node is not visited twice in resolving `:*=`, etc operators. */
private var starCycleGuard = false
/** Resolve all the star operators into concrete indicies. As connections are being made, some may be "star"
* connections which need to be resolved. In some way to determine how many actual edges they correspond to. We also
* need to build up the ranges of edges which correspond to each binding operator, so that We can apply the correct
* edge parameters and later build up correct bundle connections.
*
* [[oPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that oPort (binding
* operator). [[iPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that iPort
* (binding operator). [[oStar]]: `Int` the value to return for this node `N` for any `N :*= foo` or `N :*=* foo :*=
* bar` [[iStar]]: `Int` the value to return for this node `N` for any `foo :=* N` or `bar :=* foo :*=* N`
*/
protected[diplomacy] lazy val (
oPortMapping: Seq[(Int, Int)],
iPortMapping: Seq[(Int, Int)],
oStar: Int,
iStar: Int
) = {
try {
if (starCycleGuard) throw StarCycleException()
starCycleGuard = true
// For a given node N...
// Number of foo :=* N
// + Number of bar :=* foo :*=* N
val oStars = oBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) < 0)
}
// Number of N :*= foo
// + Number of N :*=* foo :*= bar
val iStars = iBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) > 0)
}
// 1 for foo := N
// + bar.iStar for bar :*= foo :*=* N
// + foo.iStar for foo :*= N
// + 0 for foo :=* N
val oKnown = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, 0, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => 0
}
}.sum
// 1 for N := foo
// + bar.oStar for N :*=* foo :=* bar
// + foo.oStar for N :=* foo
// + 0 for N :*= foo
val iKnown = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, 0)
case BIND_QUERY => n.oStar
case BIND_STAR => 0
}
}.sum
// Resolve star depends on the node subclass to implement the algorithm for this.
val (iStar, oStar) = resolveStar(iKnown, oKnown, iStars, oStars)
// Cumulative list of resolved outward binding range starting points
val oSum = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, oStar, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => oStar
}
}.scanLeft(0)(_ + _)
// Cumulative list of resolved inward binding range starting points
val iSum = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, iStar)
case BIND_QUERY => n.oStar
case BIND_STAR => iStar
}
}.scanLeft(0)(_ + _)
// Create ranges for each binding based on the running sums and return
// those along with resolved values for the star operations.
(oSum.init.zip(oSum.tail), iSum.init.zip(iSum.tail), oStar, iStar)
} catch {
case c: StarCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Sequence of inward ports.
*
* This should be called after all star bindings are resolved.
*
* Each element is: `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding.
* `n` Instance of inward node. `p` View of [[Parameters]] where this connection was made. `s` Source info where this
* connection was made in the source code.
*/
protected[diplomacy] lazy val oDirectPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] =
oBindings.flatMap { case (i, n, _, p, s) =>
// for each binding operator in this node, look at what it connects to
val (start, end) = n.iPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
/** Sequence of outward ports.
*
* This should be called after all star bindings are resolved.
*
* `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. `n` Instance of
* outward node. `p` View of [[Parameters]] where this connection was made. `s` [[SourceInfo]] where this connection
* was made in the source code.
*/
protected[diplomacy] lazy val iDirectPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] =
iBindings.flatMap { case (i, n, _, p, s) =>
// query this port index range of this node in the other side of node.
val (start, end) = n.oPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
// Ephemeral nodes ( which have non-None iForward/oForward) have in_degree = out_degree
// Thus, there must exist an Eulerian path and the below algorithms terminate
@scala.annotation.tailrec
private def oTrace(
tuple: (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)
): (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.iForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => oTrace((j, m, p, s))
}
}
@scala.annotation.tailrec
private def iTrace(
tuple: (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)
): (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.oForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => iTrace((j, m, p, s))
}
}
/** Final output ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - Numeric index of this binding in the [[InwardNode]] on the other end.
* - [[InwardNode]] on the other end of this binding.
* - A view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val oPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oDirectPorts.map(oTrace)
/** Final input ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - numeric index of this binding in [[OutwardNode]] on the other end.
* - [[OutwardNode]] on the other end of this binding.
* - a view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val iPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iDirectPorts.map(iTrace)
private var oParamsCycleGuard = false
protected[diplomacy] lazy val diParams: Seq[DI] = iPorts.map { case (i, n, _, _) => n.doParams(i) }
protected[diplomacy] lazy val doParams: Seq[DO] = {
try {
if (oParamsCycleGuard) throw DownwardCycleException()
oParamsCycleGuard = true
val o = mapParamsD(oPorts.size, diParams)
require(
o.size == oPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of outward ports should equal the number of produced outward parameters.
|$context
|$connectedPortsInfo
|Downstreamed inward parameters: [${diParams.mkString(",")}]
|Produced outward parameters: [${o.mkString(",")}]
|""".stripMargin
)
o.map(outer.mixO(_, this))
} catch {
case c: DownwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
private var iParamsCycleGuard = false
protected[diplomacy] lazy val uoParams: Seq[UO] = oPorts.map { case (o, n, _, _) => n.uiParams(o) }
protected[diplomacy] lazy val uiParams: Seq[UI] = {
try {
if (iParamsCycleGuard) throw UpwardCycleException()
iParamsCycleGuard = true
val i = mapParamsU(iPorts.size, uoParams)
require(
i.size == iPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of inward ports should equal the number of produced inward parameters.
|$context
|$connectedPortsInfo
|Upstreamed outward parameters: [${uoParams.mkString(",")}]
|Produced inward parameters: [${i.mkString(",")}]
|""".stripMargin
)
i.map(inner.mixI(_, this))
} catch {
case c: UpwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Outward edge parameters. */
protected[diplomacy] lazy val edgesOut: Seq[EO] =
(oPorts.zip(doParams)).map { case ((i, n, p, s), o) => outer.edgeO(o, n.uiParams(i), p, s) }
/** Inward edge parameters. */
protected[diplomacy] lazy val edgesIn: Seq[EI] =
(iPorts.zip(uiParams)).map { case ((o, n, p, s), i) => inner.edgeI(n.doParams(o), i, p, s) }
/** A tuple of the input edge parameters and output edge parameters for the edges bound to this node.
*
* If you need to access to the edges of a foreign Node, use this method (in/out create bundles).
*/
lazy val edges: Edges[EI, EO] = Edges(edgesIn, edgesOut)
/** Create actual Wires corresponding to the Bundles parameterized by the outward edges of this node. */
protected[diplomacy] lazy val bundleOut: Seq[BO] = edgesOut.map { e =>
val x = Wire(outer.bundleO(e)).suggestName(s"${valName.value}Out")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
/** Create actual Wires corresponding to the Bundles parameterized by the inward edges of this node. */
protected[diplomacy] lazy val bundleIn: Seq[BI] = edgesIn.map { e =>
val x = Wire(inner.bundleI(e)).suggestName(s"${valName.value}In")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
private def emptyDanglesOut: Seq[Dangle] = oPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(serial, i),
sink = HalfEdge(n.serial, j),
flipped = false,
name = wirePrefix + "out",
dataOpt = None
)
}
private def emptyDanglesIn: Seq[Dangle] = iPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(n.serial, j),
sink = HalfEdge(serial, i),
flipped = true,
name = wirePrefix + "in",
dataOpt = None
)
}
/** Create the [[Dangle]]s which describe the connections from this node output to other nodes inputs. */
protected[diplomacy] def danglesOut: Seq[Dangle] = emptyDanglesOut.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleOut(i)))
}
/** Create the [[Dangle]]s which describe the connections from this node input from other nodes outputs. */
protected[diplomacy] def danglesIn: Seq[Dangle] = emptyDanglesIn.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleIn(i)))
}
private[diplomacy] var instantiated = false
/** Gather Bundle and edge parameters of outward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def out: Seq[(BO, EO)] = {
require(
instantiated,
s"$name.out should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleOut.zip(edgesOut)
}
/** Gather Bundle and edge parameters of inward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def in: Seq[(BI, EI)] = {
require(
instantiated,
s"$name.in should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleIn.zip(edgesIn)
}
/** Actually instantiate this node during [[LazyModuleImp]] evaluation. Mark that it's safe to use the Bundle wires,
* instantiate monitors on all input ports if appropriate, and return all the dangles of this node.
*/
protected[diplomacy] def instantiate(): Seq[Dangle] = {
instantiated = true
if (!circuitIdentity) {
(iPorts.zip(in)).foreach { case ((_, _, p, _), (b, e)) => if (p(MonitorsEnabled)) inner.monitor(b, e) }
}
danglesOut ++ danglesIn
}
protected[diplomacy] def cloneDangles(): Seq[Dangle] = emptyDanglesOut ++ emptyDanglesIn
/** Connects the outward part of a node with the inward part of this node. */
protected[diplomacy] def bind(
h: OutwardNode[DI, UI, BI],
binding: NodeBinding
)(
implicit p: Parameters,
sourceInfo: SourceInfo
): Unit = {
val x = this // x := y
val y = h
sourceLine(sourceInfo, " at ", "")
val i = x.iPushed
val o = y.oPushed
y.oPush(
i,
x,
binding match {
case BIND_ONCE => BIND_ONCE
case BIND_FLEX => BIND_FLEX
case BIND_STAR => BIND_QUERY
case BIND_QUERY => BIND_STAR
}
)
x.iPush(o, y, binding)
}
/* Metadata for printing the node graph. */
def inputs: Seq[(OutwardNode[DI, UI, BI], RenderedEdge)] = (iPorts.zip(edgesIn)).map { case ((_, n, p, _), e) =>
val re = inner.render(e)
(n, re.copy(flipped = re.flipped != p(RenderFlipped)))
}
/** Metadata for printing the node graph */
def outputs: Seq[(InwardNode[DO, UO, BO], RenderedEdge)] = oPorts.map { case (i, n, _, _) => (n, n.inputs(i)._2) }
}
| module TLBuffer_a32d64s7k1z3u_1( // @[Buffer.scala:40:9]
input clock, // @[Buffer.scala:40:9]
input reset, // @[Buffer.scala:40:9]
output auto_in_a_ready, // @[LazyModuleImp.scala:107:25]
input auto_in_a_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_a_bits_param, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_a_bits_size, // @[LazyModuleImp.scala:107:25]
input [6:0] auto_in_a_bits_source, // @[LazyModuleImp.scala:107:25]
input [31:0] auto_in_a_bits_address, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_in_a_bits_mask, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_in_a_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_in_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_in_d_ready, // @[LazyModuleImp.scala:107:25]
output auto_in_d_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_in_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_in_d_bits_param, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_in_d_bits_size, // @[LazyModuleImp.scala:107:25]
output [6:0] auto_in_d_bits_source, // @[LazyModuleImp.scala:107:25]
output auto_in_d_bits_sink, // @[LazyModuleImp.scala:107:25]
output auto_in_d_bits_denied, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_in_d_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_in_d_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_out_a_ready, // @[LazyModuleImp.scala:107:25]
output auto_out_a_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_a_bits_param, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_a_bits_size, // @[LazyModuleImp.scala:107:25]
output [6:0] auto_out_a_bits_source, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_out_a_bits_address, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_out_a_bits_mask, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_out_a_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_out_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output auto_out_d_ready, // @[LazyModuleImp.scala:107:25]
input auto_out_d_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_out_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_out_d_bits_size, // @[LazyModuleImp.scala:107:25]
input [6:0] auto_out_d_bits_source, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_out_d_bits_data // @[LazyModuleImp.scala:107:25]
);
wire auto_in_a_valid_0 = auto_in_a_valid; // @[Buffer.scala:40:9]
wire [2:0] auto_in_a_bits_opcode_0 = auto_in_a_bits_opcode; // @[Buffer.scala:40:9]
wire [2:0] auto_in_a_bits_param_0 = auto_in_a_bits_param; // @[Buffer.scala:40:9]
wire [2:0] auto_in_a_bits_size_0 = auto_in_a_bits_size; // @[Buffer.scala:40:9]
wire [6:0] auto_in_a_bits_source_0 = auto_in_a_bits_source; // @[Buffer.scala:40:9]
wire [31:0] auto_in_a_bits_address_0 = auto_in_a_bits_address; // @[Buffer.scala:40:9]
wire [7:0] auto_in_a_bits_mask_0 = auto_in_a_bits_mask; // @[Buffer.scala:40:9]
wire [63:0] auto_in_a_bits_data_0 = auto_in_a_bits_data; // @[Buffer.scala:40:9]
wire auto_in_a_bits_corrupt_0 = auto_in_a_bits_corrupt; // @[Buffer.scala:40:9]
wire auto_in_d_ready_0 = auto_in_d_ready; // @[Buffer.scala:40:9]
wire auto_out_a_ready_0 = auto_out_a_ready; // @[Buffer.scala:40:9]
wire auto_out_d_valid_0 = auto_out_d_valid; // @[Buffer.scala:40:9]
wire [2:0] auto_out_d_bits_opcode_0 = auto_out_d_bits_opcode; // @[Buffer.scala:40:9]
wire [2:0] auto_out_d_bits_size_0 = auto_out_d_bits_size; // @[Buffer.scala:40:9]
wire [6:0] auto_out_d_bits_source_0 = auto_out_d_bits_source; // @[Buffer.scala:40:9]
wire [63:0] auto_out_d_bits_data_0 = auto_out_d_bits_data; // @[Buffer.scala:40:9]
wire auto_out_d_bits_sink = 1'h0; // @[Decoupled.scala:362:21]
wire auto_out_d_bits_denied = 1'h0; // @[Decoupled.scala:362:21]
wire auto_out_d_bits_corrupt = 1'h0; // @[Decoupled.scala:362:21]
wire nodeOut_d_bits_sink = 1'h0; // @[Decoupled.scala:362:21]
wire nodeOut_d_bits_denied = 1'h0; // @[Decoupled.scala:362:21]
wire nodeOut_d_bits_corrupt = 1'h0; // @[Decoupled.scala:362:21]
wire [1:0] auto_out_d_bits_param = 2'h0; // @[Decoupled.scala:362:21]
wire nodeIn_a_ready; // @[MixedNode.scala:551:17]
wire [1:0] nodeOut_d_bits_param = 2'h0; // @[Decoupled.scala:362:21]
wire nodeIn_a_valid = auto_in_a_valid_0; // @[Buffer.scala:40:9]
wire [2:0] nodeIn_a_bits_opcode = auto_in_a_bits_opcode_0; // @[Buffer.scala:40:9]
wire [2:0] nodeIn_a_bits_param = auto_in_a_bits_param_0; // @[Buffer.scala:40:9]
wire [2:0] nodeIn_a_bits_size = auto_in_a_bits_size_0; // @[Buffer.scala:40:9]
wire [6:0] nodeIn_a_bits_source = auto_in_a_bits_source_0; // @[Buffer.scala:40:9]
wire [31:0] nodeIn_a_bits_address = auto_in_a_bits_address_0; // @[Buffer.scala:40:9]
wire [7:0] nodeIn_a_bits_mask = auto_in_a_bits_mask_0; // @[Buffer.scala:40:9]
wire [63:0] nodeIn_a_bits_data = auto_in_a_bits_data_0; // @[Buffer.scala:40:9]
wire nodeIn_a_bits_corrupt = auto_in_a_bits_corrupt_0; // @[Buffer.scala:40:9]
wire nodeIn_d_ready = auto_in_d_ready_0; // @[Buffer.scala:40:9]
wire nodeIn_d_valid; // @[MixedNode.scala:551:17]
wire [2:0] nodeIn_d_bits_opcode; // @[MixedNode.scala:551:17]
wire [1:0] nodeIn_d_bits_param; // @[MixedNode.scala:551:17]
wire [2:0] nodeIn_d_bits_size; // @[MixedNode.scala:551:17]
wire [6:0] nodeIn_d_bits_source; // @[MixedNode.scala:551:17]
wire nodeIn_d_bits_sink; // @[MixedNode.scala:551:17]
wire nodeIn_d_bits_denied; // @[MixedNode.scala:551:17]
wire [63:0] nodeIn_d_bits_data; // @[MixedNode.scala:551:17]
wire nodeIn_d_bits_corrupt; // @[MixedNode.scala:551:17]
wire nodeOut_a_ready = auto_out_a_ready_0; // @[Buffer.scala:40:9]
wire nodeOut_a_valid; // @[MixedNode.scala:542:17]
wire [2:0] nodeOut_a_bits_opcode; // @[MixedNode.scala:542:17]
wire [2:0] nodeOut_a_bits_param; // @[MixedNode.scala:542:17]
wire [2:0] nodeOut_a_bits_size; // @[MixedNode.scala:542:17]
wire [6:0] nodeOut_a_bits_source; // @[MixedNode.scala:542:17]
wire [31:0] nodeOut_a_bits_address; // @[MixedNode.scala:542:17]
wire [7:0] nodeOut_a_bits_mask; // @[MixedNode.scala:542:17]
wire [63:0] nodeOut_a_bits_data; // @[MixedNode.scala:542:17]
wire nodeOut_a_bits_corrupt; // @[MixedNode.scala:542:17]
wire nodeOut_d_ready; // @[MixedNode.scala:542:17]
wire nodeOut_d_valid = auto_out_d_valid_0; // @[Buffer.scala:40:9]
wire [2:0] nodeOut_d_bits_opcode = auto_out_d_bits_opcode_0; // @[Buffer.scala:40:9]
wire [2:0] nodeOut_d_bits_size = auto_out_d_bits_size_0; // @[Buffer.scala:40:9]
wire [6:0] nodeOut_d_bits_source = auto_out_d_bits_source_0; // @[Buffer.scala:40:9]
wire [63:0] nodeOut_d_bits_data = auto_out_d_bits_data_0; // @[Buffer.scala:40:9]
wire auto_in_a_ready_0; // @[Buffer.scala:40:9]
wire [2:0] auto_in_d_bits_opcode_0; // @[Buffer.scala:40:9]
wire [1:0] auto_in_d_bits_param_0; // @[Buffer.scala:40:9]
wire [2:0] auto_in_d_bits_size_0; // @[Buffer.scala:40:9]
wire [6:0] auto_in_d_bits_source_0; // @[Buffer.scala:40:9]
wire auto_in_d_bits_sink_0; // @[Buffer.scala:40:9]
wire auto_in_d_bits_denied_0; // @[Buffer.scala:40:9]
wire [63:0] auto_in_d_bits_data_0; // @[Buffer.scala:40:9]
wire auto_in_d_bits_corrupt_0; // @[Buffer.scala:40:9]
wire auto_in_d_valid_0; // @[Buffer.scala:40:9]
wire [2:0] auto_out_a_bits_opcode_0; // @[Buffer.scala:40:9]
wire [2:0] auto_out_a_bits_param_0; // @[Buffer.scala:40:9]
wire [2:0] auto_out_a_bits_size_0; // @[Buffer.scala:40:9]
wire [6:0] auto_out_a_bits_source_0; // @[Buffer.scala:40:9]
wire [31:0] auto_out_a_bits_address_0; // @[Buffer.scala:40:9]
wire [7:0] auto_out_a_bits_mask_0; // @[Buffer.scala:40:9]
wire [63:0] auto_out_a_bits_data_0; // @[Buffer.scala:40:9]
wire auto_out_a_bits_corrupt_0; // @[Buffer.scala:40:9]
wire auto_out_a_valid_0; // @[Buffer.scala:40:9]
wire auto_out_d_ready_0; // @[Buffer.scala:40:9]
assign auto_in_a_ready_0 = nodeIn_a_ready; // @[Buffer.scala:40:9]
assign auto_in_d_valid_0 = nodeIn_d_valid; // @[Buffer.scala:40:9]
assign auto_in_d_bits_opcode_0 = nodeIn_d_bits_opcode; // @[Buffer.scala:40:9]
assign auto_in_d_bits_param_0 = nodeIn_d_bits_param; // @[Buffer.scala:40:9]
assign auto_in_d_bits_size_0 = nodeIn_d_bits_size; // @[Buffer.scala:40:9]
assign auto_in_d_bits_source_0 = nodeIn_d_bits_source; // @[Buffer.scala:40:9]
assign auto_in_d_bits_sink_0 = nodeIn_d_bits_sink; // @[Buffer.scala:40:9]
assign auto_in_d_bits_denied_0 = nodeIn_d_bits_denied; // @[Buffer.scala:40:9]
assign auto_in_d_bits_data_0 = nodeIn_d_bits_data; // @[Buffer.scala:40:9]
assign auto_in_d_bits_corrupt_0 = nodeIn_d_bits_corrupt; // @[Buffer.scala:40:9]
assign auto_out_a_valid_0 = nodeOut_a_valid; // @[Buffer.scala:40:9]
assign auto_out_a_bits_opcode_0 = nodeOut_a_bits_opcode; // @[Buffer.scala:40:9]
assign auto_out_a_bits_param_0 = nodeOut_a_bits_param; // @[Buffer.scala:40:9]
assign auto_out_a_bits_size_0 = nodeOut_a_bits_size; // @[Buffer.scala:40:9]
assign auto_out_a_bits_source_0 = nodeOut_a_bits_source; // @[Buffer.scala:40:9]
assign auto_out_a_bits_address_0 = nodeOut_a_bits_address; // @[Buffer.scala:40:9]
assign auto_out_a_bits_mask_0 = nodeOut_a_bits_mask; // @[Buffer.scala:40:9]
assign auto_out_a_bits_data_0 = nodeOut_a_bits_data; // @[Buffer.scala:40:9]
assign auto_out_a_bits_corrupt_0 = nodeOut_a_bits_corrupt; // @[Buffer.scala:40:9]
assign auto_out_d_ready_0 = nodeOut_d_ready; // @[Buffer.scala:40:9]
Queue2_TLBundleA_a32d64s7k1z3u nodeOut_a_q ( // @[Decoupled.scala:362:21]
.clock (clock),
.reset (reset),
.io_enq_ready (nodeIn_a_ready),
.io_enq_valid (nodeIn_a_valid), // @[MixedNode.scala:551:17]
.io_enq_bits_opcode (nodeIn_a_bits_opcode), // @[MixedNode.scala:551:17]
.io_enq_bits_param (nodeIn_a_bits_param), // @[MixedNode.scala:551:17]
.io_enq_bits_size (nodeIn_a_bits_size), // @[MixedNode.scala:551:17]
.io_enq_bits_source (nodeIn_a_bits_source), // @[MixedNode.scala:551:17]
.io_enq_bits_address (nodeIn_a_bits_address), // @[MixedNode.scala:551:17]
.io_enq_bits_mask (nodeIn_a_bits_mask), // @[MixedNode.scala:551:17]
.io_enq_bits_data (nodeIn_a_bits_data), // @[MixedNode.scala:551:17]
.io_enq_bits_corrupt (nodeIn_a_bits_corrupt), // @[MixedNode.scala:551:17]
.io_deq_ready (nodeOut_a_ready), // @[MixedNode.scala:542:17]
.io_deq_valid (nodeOut_a_valid),
.io_deq_bits_opcode (nodeOut_a_bits_opcode),
.io_deq_bits_param (nodeOut_a_bits_param),
.io_deq_bits_size (nodeOut_a_bits_size),
.io_deq_bits_source (nodeOut_a_bits_source),
.io_deq_bits_address (nodeOut_a_bits_address),
.io_deq_bits_mask (nodeOut_a_bits_mask),
.io_deq_bits_data (nodeOut_a_bits_data),
.io_deq_bits_corrupt (nodeOut_a_bits_corrupt)
); // @[Decoupled.scala:362:21]
Queue2_TLBundleD_a32d64s7k1z3u nodeIn_d_q ( // @[Decoupled.scala:362:21]
.clock (clock),
.reset (reset),
.io_enq_ready (nodeOut_d_ready),
.io_enq_valid (nodeOut_d_valid), // @[MixedNode.scala:542:17]
.io_enq_bits_opcode (nodeOut_d_bits_opcode), // @[MixedNode.scala:542:17]
.io_enq_bits_size (nodeOut_d_bits_size), // @[MixedNode.scala:542:17]
.io_enq_bits_source (nodeOut_d_bits_source), // @[MixedNode.scala:542:17]
.io_enq_bits_data (nodeOut_d_bits_data), // @[MixedNode.scala:542:17]
.io_deq_ready (nodeIn_d_ready), // @[MixedNode.scala:551:17]
.io_deq_valid (nodeIn_d_valid),
.io_deq_bits_opcode (nodeIn_d_bits_opcode),
.io_deq_bits_param (nodeIn_d_bits_param),
.io_deq_bits_size (nodeIn_d_bits_size),
.io_deq_bits_source (nodeIn_d_bits_source),
.io_deq_bits_sink (nodeIn_d_bits_sink),
.io_deq_bits_denied (nodeIn_d_bits_denied),
.io_deq_bits_data (nodeIn_d_bits_data),
.io_deq_bits_corrupt (nodeIn_d_bits_corrupt)
); // @[Decoupled.scala:362:21]
assign auto_in_a_ready = auto_in_a_ready_0; // @[Buffer.scala:40:9]
assign auto_in_d_valid = auto_in_d_valid_0; // @[Buffer.scala:40:9]
assign auto_in_d_bits_opcode = auto_in_d_bits_opcode_0; // @[Buffer.scala:40:9]
assign auto_in_d_bits_param = auto_in_d_bits_param_0; // @[Buffer.scala:40:9]
assign auto_in_d_bits_size = auto_in_d_bits_size_0; // @[Buffer.scala:40:9]
assign auto_in_d_bits_source = auto_in_d_bits_source_0; // @[Buffer.scala:40:9]
assign auto_in_d_bits_sink = auto_in_d_bits_sink_0; // @[Buffer.scala:40:9]
assign auto_in_d_bits_denied = auto_in_d_bits_denied_0; // @[Buffer.scala:40:9]
assign auto_in_d_bits_data = auto_in_d_bits_data_0; // @[Buffer.scala:40:9]
assign auto_in_d_bits_corrupt = auto_in_d_bits_corrupt_0; // @[Buffer.scala:40:9]
assign auto_out_a_valid = auto_out_a_valid_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_opcode = auto_out_a_bits_opcode_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_param = auto_out_a_bits_param_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_size = auto_out_a_bits_size_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_source = auto_out_a_bits_source_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_address = auto_out_a_bits_address_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_mask = auto_out_a_bits_mask_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_data = auto_out_a_bits_data_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_corrupt = auto_out_a_bits_corrupt_0; // @[Buffer.scala:40:9]
assign auto_out_d_ready = auto_out_d_ready_0; // @[Buffer.scala:40:9]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File IngressUnit.scala:
package constellation.router
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.{Field, Parameters}
import freechips.rocketchip.util._
import constellation.channel._
class IngressUnit(
ingressNodeId: Int,
cParam: IngressChannelParams,
outParams: Seq[ChannelParams],
egressParams: Seq[EgressChannelParams],
combineRCVA: Boolean,
combineSAST: Boolean,
)
(implicit p: Parameters) extends AbstractInputUnit(cParam, outParams, egressParams)(p) {
class IngressUnitIO extends AbstractInputUnitIO(cParam, outParams, egressParams) {
val in = Flipped(Decoupled(new IngressFlit(cParam.payloadBits)))
}
val io = IO(new IngressUnitIO)
val route_buffer = Module(new Queue(new Flit(cParam.payloadBits), 2))
val route_q = Module(new Queue(new RouteComputerResp(outParams, egressParams), 2,
flow=combineRCVA))
assert(!(io.in.valid && !cParam.possibleFlows.toSeq.map(_.egressId.U === io.in.bits.egress_id).orR))
route_buffer.io.enq.bits.head := io.in.bits.head
route_buffer.io.enq.bits.tail := io.in.bits.tail
val flows = cParam.possibleFlows.toSeq
if (flows.size == 0) {
route_buffer.io.enq.bits.flow := DontCare
} else {
route_buffer.io.enq.bits.flow.ingress_node := cParam.destId.U
route_buffer.io.enq.bits.flow.ingress_node_id := ingressNodeId.U
route_buffer.io.enq.bits.flow.vnet_id := cParam.vNetId.U
route_buffer.io.enq.bits.flow.egress_node := Mux1H(
flows.map(_.egressId.U === io.in.bits.egress_id),
flows.map(_.egressNode.U)
)
route_buffer.io.enq.bits.flow.egress_node_id := Mux1H(
flows.map(_.egressId.U === io.in.bits.egress_id),
flows.map(_.egressNodeId.U)
)
}
route_buffer.io.enq.bits.payload := io.in.bits.payload
route_buffer.io.enq.bits.virt_channel_id := DontCare
io.router_req.bits.src_virt_id := 0.U
io.router_req.bits.flow := route_buffer.io.enq.bits.flow
val at_dest = route_buffer.io.enq.bits.flow.egress_node === nodeId.U
route_buffer.io.enq.valid := io.in.valid && (
io.router_req.ready || !io.in.bits.head || at_dest)
io.router_req.valid := io.in.valid && route_buffer.io.enq.ready && io.in.bits.head && !at_dest
io.in.ready := route_buffer.io.enq.ready && (
io.router_req.ready || !io.in.bits.head || at_dest)
route_q.io.enq.valid := io.router_req.fire
route_q.io.enq.bits := io.router_resp
when (io.in.fire && io.in.bits.head && at_dest) {
route_q.io.enq.valid := true.B
route_q.io.enq.bits.vc_sel.foreach(_.foreach(_ := false.B))
for (o <- 0 until nEgress) {
when (egressParams(o).egressId.U === io.in.bits.egress_id) {
route_q.io.enq.bits.vc_sel(o+nOutputs)(0) := true.B
}
}
}
assert(!(route_q.io.enq.valid && !route_q.io.enq.ready))
val vcalloc_buffer = Module(new Queue(new Flit(cParam.payloadBits), 2))
val vcalloc_q = Module(new Queue(new VCAllocResp(outParams, egressParams),
1, pipe=true))
vcalloc_buffer.io.enq.bits := route_buffer.io.deq.bits
io.vcalloc_req.bits.vc_sel := route_q.io.deq.bits.vc_sel
io.vcalloc_req.bits.flow := route_buffer.io.deq.bits.flow
io.vcalloc_req.bits.in_vc := 0.U
val head = route_buffer.io.deq.bits.head
val tail = route_buffer.io.deq.bits.tail
vcalloc_buffer.io.enq.valid := (route_buffer.io.deq.valid &&
(route_q.io.deq.valid || !head) &&
(io.vcalloc_req.ready || !head)
)
io.vcalloc_req.valid := (route_buffer.io.deq.valid && route_q.io.deq.valid &&
head && vcalloc_buffer.io.enq.ready && vcalloc_q.io.enq.ready)
route_buffer.io.deq.ready := (vcalloc_buffer.io.enq.ready &&
(route_q.io.deq.valid || !head) &&
(io.vcalloc_req.ready || !head) &&
(vcalloc_q.io.enq.ready || !head))
route_q.io.deq.ready := (route_buffer.io.deq.fire && tail)
vcalloc_q.io.enq.valid := io.vcalloc_req.fire
vcalloc_q.io.enq.bits := io.vcalloc_resp
assert(!(vcalloc_q.io.enq.valid && !vcalloc_q.io.enq.ready))
io.salloc_req(0).bits.vc_sel := vcalloc_q.io.deq.bits.vc_sel
io.salloc_req(0).bits.tail := vcalloc_buffer.io.deq.bits.tail
val c = (vcalloc_q.io.deq.bits.vc_sel.asUInt & io.out_credit_available.asUInt) =/= 0.U
val vcalloc_tail = vcalloc_buffer.io.deq.bits.tail
io.salloc_req(0).valid := vcalloc_buffer.io.deq.valid && vcalloc_q.io.deq.valid && c && !io.block
vcalloc_buffer.io.deq.ready := io.salloc_req(0).ready && vcalloc_q.io.deq.valid && c && !io.block
vcalloc_q.io.deq.ready := vcalloc_tail && vcalloc_buffer.io.deq.fire
val out_bundle = if (combineSAST) {
Wire(Valid(new SwitchBundle(outParams, egressParams)))
} else {
Reg(Valid(new SwitchBundle(outParams, egressParams)))
}
io.out(0) := out_bundle
out_bundle.valid := vcalloc_buffer.io.deq.fire
out_bundle.bits.flit := vcalloc_buffer.io.deq.bits
out_bundle.bits.flit.virt_channel_id := 0.U
val out_channel_oh = vcalloc_q.io.deq.bits.vc_sel.map(_.reduce(_||_)).toSeq
out_bundle.bits.out_virt_channel := Mux1H(out_channel_oh,
vcalloc_q.io.deq.bits.vc_sel.map(v => OHToUInt(v)).toSeq)
io.debug.va_stall := io.vcalloc_req.valid && !io.vcalloc_req.ready
io.debug.sa_stall := io.salloc_req(0).valid && !io.salloc_req(0).ready
// TODO: We should not generate input/ingress/output/egress units for untraversable channels
if (!cParam.traversable) {
io.in.ready := false.B
io.router_req.valid := false.B
io.router_req.bits := DontCare
io.vcalloc_req.valid := false.B
io.vcalloc_req.bits := DontCare
io.salloc_req.foreach(_.valid := false.B)
io.salloc_req.foreach(_.bits := DontCare)
io.out.foreach(_.valid := false.B)
io.out.foreach(_.bits := DontCare)
}
}
| module IngressUnit_13( // @[IngressUnit.scala:11:7]
input clock, // @[IngressUnit.scala:11:7]
input reset, // @[IngressUnit.scala:11:7]
output [3:0] io_router_req_bits_flow_egress_node, // @[IngressUnit.scala:24:14]
output [1:0] io_router_req_bits_flow_egress_node_id, // @[IngressUnit.scala:24:14]
input io_router_resp_vc_sel_3_0, // @[IngressUnit.scala:24:14]
input io_router_resp_vc_sel_3_1, // @[IngressUnit.scala:24:14]
input io_router_resp_vc_sel_3_2, // @[IngressUnit.scala:24:14]
input io_router_resp_vc_sel_2_0, // @[IngressUnit.scala:24:14]
input io_router_resp_vc_sel_2_1, // @[IngressUnit.scala:24:14]
input io_router_resp_vc_sel_2_2, // @[IngressUnit.scala:24:14]
input io_router_resp_vc_sel_1_0, // @[IngressUnit.scala:24:14]
input io_router_resp_vc_sel_1_1, // @[IngressUnit.scala:24:14]
input io_router_resp_vc_sel_1_2, // @[IngressUnit.scala:24:14]
input io_router_resp_vc_sel_0_0, // @[IngressUnit.scala:24:14]
input io_router_resp_vc_sel_0_1, // @[IngressUnit.scala:24:14]
input io_router_resp_vc_sel_0_2, // @[IngressUnit.scala:24:14]
input io_vcalloc_req_ready, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_valid, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_5_0, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_4_0, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_3_0, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_3_1, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_3_2, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_2_0, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_2_1, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_2_2, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_1_0, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_1_1, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_1_2, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_0_0, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_0_1, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_0_2, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_5_0, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_4_0, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_3_0, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_3_1, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_3_2, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_2_0, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_2_1, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_2_2, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_1_0, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_1_1, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_1_2, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_0_0, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_0_1, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_0_2, // @[IngressUnit.scala:24:14]
input io_out_credit_available_5_0, // @[IngressUnit.scala:24:14]
input io_out_credit_available_4_0, // @[IngressUnit.scala:24:14]
input io_out_credit_available_3_0, // @[IngressUnit.scala:24:14]
input io_out_credit_available_3_1, // @[IngressUnit.scala:24:14]
input io_out_credit_available_3_2, // @[IngressUnit.scala:24:14]
input io_out_credit_available_2_0, // @[IngressUnit.scala:24:14]
input io_out_credit_available_2_1, // @[IngressUnit.scala:24:14]
input io_out_credit_available_2_2, // @[IngressUnit.scala:24:14]
input io_out_credit_available_1_0, // @[IngressUnit.scala:24:14]
input io_out_credit_available_1_2, // @[IngressUnit.scala:24:14]
input io_out_credit_available_0_0, // @[IngressUnit.scala:24:14]
input io_salloc_req_0_ready, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_valid, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_5_0, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_4_0, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_3_0, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_3_1, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_3_2, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_2_0, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_2_1, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_2_2, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_1_0, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_1_1, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_1_2, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_0_0, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_0_1, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_0_2, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_tail, // @[IngressUnit.scala:24:14]
output io_out_0_valid, // @[IngressUnit.scala:24:14]
output io_out_0_bits_flit_head, // @[IngressUnit.scala:24:14]
output io_out_0_bits_flit_tail, // @[IngressUnit.scala:24:14]
output [144:0] io_out_0_bits_flit_payload, // @[IngressUnit.scala:24:14]
output [1:0] io_out_0_bits_flit_flow_vnet_id, // @[IngressUnit.scala:24:14]
output [3:0] io_out_0_bits_flit_flow_ingress_node, // @[IngressUnit.scala:24:14]
output [2:0] io_out_0_bits_flit_flow_ingress_node_id, // @[IngressUnit.scala:24:14]
output [3:0] io_out_0_bits_flit_flow_egress_node, // @[IngressUnit.scala:24:14]
output [1:0] io_out_0_bits_flit_flow_egress_node_id, // @[IngressUnit.scala:24:14]
output [1:0] io_out_0_bits_out_virt_channel, // @[IngressUnit.scala:24:14]
output io_in_ready, // @[IngressUnit.scala:24:14]
input io_in_valid, // @[IngressUnit.scala:24:14]
input io_in_bits_head, // @[IngressUnit.scala:24:14]
input io_in_bits_tail, // @[IngressUnit.scala:24:14]
input [144:0] io_in_bits_payload, // @[IngressUnit.scala:24:14]
input [3:0] io_in_bits_egress_id // @[IngressUnit.scala:24:14]
);
wire _vcalloc_q_io_enq_ready; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_valid; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_5_0; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_4_0; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_3_0; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_3_1; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_3_2; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_2_0; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_2_1; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_2_2; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_1_0; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_1_1; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_1_2; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_0_0; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_0_1; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_0_2; // @[IngressUnit.scala:76:25]
wire _vcalloc_buffer_io_enq_ready; // @[IngressUnit.scala:75:30]
wire _vcalloc_buffer_io_deq_valid; // @[IngressUnit.scala:75:30]
wire _vcalloc_buffer_io_deq_bits_tail; // @[IngressUnit.scala:75:30]
wire _route_q_io_enq_ready; // @[IngressUnit.scala:27:23]
wire _route_q_io_deq_valid; // @[IngressUnit.scala:27:23]
wire _route_buffer_io_enq_ready; // @[IngressUnit.scala:26:28]
wire _route_buffer_io_deq_valid; // @[IngressUnit.scala:26:28]
wire _route_buffer_io_deq_bits_head; // @[IngressUnit.scala:26:28]
wire _route_buffer_io_deq_bits_tail; // @[IngressUnit.scala:26:28]
wire [144:0] _route_buffer_io_deq_bits_payload; // @[IngressUnit.scala:26:28]
wire [1:0] _route_buffer_io_deq_bits_flow_vnet_id; // @[IngressUnit.scala:26:28]
wire [3:0] _route_buffer_io_deq_bits_flow_ingress_node; // @[IngressUnit.scala:26:28]
wire [2:0] _route_buffer_io_deq_bits_flow_ingress_node_id; // @[IngressUnit.scala:26:28]
wire [3:0] _route_buffer_io_deq_bits_flow_egress_node; // @[IngressUnit.scala:26:28]
wire [1:0] _route_buffer_io_deq_bits_flow_egress_node_id; // @[IngressUnit.scala:26:28]
wire [1:0] _route_buffer_io_deq_bits_virt_channel_id; // @[IngressUnit.scala:26:28]
wire _route_buffer_io_enq_bits_flow_egress_node_id_T = io_in_bits_egress_id == 4'h2; // @[IngressUnit.scala:30:72]
wire _route_buffer_io_enq_bits_flow_egress_node_id_T_37 = io_in_bits_egress_id == 4'h1; // @[IngressUnit.scala:30:72]
wire _route_buffer_io_enq_bits_flow_egress_node_id_T_2 = io_in_bits_egress_id == 4'hC; // @[IngressUnit.scala:30:72]
wire _route_buffer_io_enq_bits_flow_egress_node_id_T_3 = io_in_bits_egress_id == 4'h7; // @[IngressUnit.scala:30:72]
wire _route_buffer_io_enq_bits_flow_egress_node_id_T_4 = io_in_bits_egress_id == 4'h5; // @[IngressUnit.scala:30:72]
wire _route_buffer_io_enq_bits_flow_egress_node_id_T_5 = io_in_bits_egress_id == 4'h6; // @[IngressUnit.scala:30:72]
wire _route_buffer_io_enq_bits_flow_egress_node_id_T_7 = io_in_bits_egress_id == 4'h8; // @[IngressUnit.scala:30:72]
wire _route_buffer_io_enq_bits_flow_egress_node_id_T_8 = io_in_bits_egress_id == 4'h9; // @[IngressUnit.scala:30:72]
wire _route_buffer_io_enq_bits_flow_egress_node_id_T_9 = io_in_bits_egress_id == 4'hB; // @[IngressUnit.scala:30:72]
wire _route_buffer_io_enq_bits_flow_egress_node_id_T_10 = io_in_bits_egress_id == 4'h4; // @[IngressUnit.scala:30:72]
wire _route_buffer_io_enq_bits_flow_egress_node_id_T_11 = io_in_bits_egress_id == 4'h3; // @[IngressUnit.scala:30:72]
wire _route_buffer_io_enq_bits_flow_egress_node_id_T_12 = io_in_bits_egress_id == 4'hA; // @[IngressUnit.scala:30:72]
wire [3:0] _route_buffer_io_enq_bits_flow_egress_node_T_28 = {4{_route_buffer_io_enq_bits_flow_egress_node_id_T_2}} | {_route_buffer_io_enq_bits_flow_egress_node_id_T_3, 2'h0, _route_buffer_io_enq_bits_flow_egress_node_id_T}; // @[Mux.scala:30:73]
wire [3:0] _route_buffer_io_enq_bits_flow_egress_node_T_34 = {_route_buffer_io_enq_bits_flow_egress_node_T_28[3], _route_buffer_io_enq_bits_flow_egress_node_T_28[2:0] | {_route_buffer_io_enq_bits_flow_egress_node_id_T_4, 2'h0} | {3{_route_buffer_io_enq_bits_flow_egress_node_id_T_5}}} | (_route_buffer_io_enq_bits_flow_egress_node_id_T_7 ? 4'hB : 4'h0) | (_route_buffer_io_enq_bits_flow_egress_node_id_T_8 ? 4'hC : 4'h0) | (_route_buffer_io_enq_bits_flow_egress_node_id_T_9 ? 4'hE : 4'h0); // @[Mux.scala:30:73]
wire [3:0] _route_buffer_io_enq_bits_flow_egress_node_T_37 = {_route_buffer_io_enq_bits_flow_egress_node_T_34[3:2], _route_buffer_io_enq_bits_flow_egress_node_T_34[1:0] | {2{_route_buffer_io_enq_bits_flow_egress_node_id_T_10}} | {_route_buffer_io_enq_bits_flow_egress_node_id_T_11, 1'h0}} | (_route_buffer_io_enq_bits_flow_egress_node_id_T_12 ? 4'hD : 4'h0); // @[Mux.scala:30:73]
wire [1:0] route_buffer_io_enq_bits_flow_egress_node_id = {1'h0, _route_buffer_io_enq_bits_flow_egress_node_id_T_37}; // @[IngressUnit.scala:30:72, :45:50]
wire _GEN = _route_buffer_io_enq_ready & io_in_valid & io_in_bits_head & _route_buffer_io_enq_bits_flow_egress_node_T_37 == 4'h5; // @[Mux.scala:30:73]
wire route_q_io_enq_valid = _GEN | io_in_valid & _route_buffer_io_enq_ready & io_in_bits_head & _route_buffer_io_enq_bits_flow_egress_node_T_37 != 4'h5; // @[Mux.scala:30:73]
wire io_vcalloc_req_valid_0 = _route_buffer_io_deq_valid & _route_q_io_deq_valid & _route_buffer_io_deq_bits_head & _vcalloc_buffer_io_enq_ready & _vcalloc_q_io_enq_ready; // @[IngressUnit.scala:26:28, :27:23, :75:30, :76:25, :91:{54,78}, :92:{10,41}]
wire route_buffer_io_deq_ready = _vcalloc_buffer_io_enq_ready & (_route_q_io_deq_valid | ~_route_buffer_io_deq_bits_head) & (io_vcalloc_req_ready | ~_route_buffer_io_deq_bits_head) & (_vcalloc_q_io_enq_ready | ~_route_buffer_io_deq_bits_head); // @[IngressUnit.scala:26:28, :27:23, :75:30, :76:25, :88:30, :93:61, :94:{27,37}, :95:{27,37}, :96:29]
wire vcalloc_q_io_enq_valid = io_vcalloc_req_ready & io_vcalloc_req_valid_0; // @[Decoupled.scala:51:35] |
Generate the Verilog code corresponding to the following Chisel files.
File GenericFIR.scala:
//// See LICENSE for license details.
//
package chipyard.example
import chisel3._
import chisel3.util._
import dspblocks._
import dsptools.numbers._
import freechips.rocketchip.amba.axi4stream._
import org.chipsalliance.cde.config.{Parameters, Field, Config}
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.tilelink._
import freechips.rocketchip.subsystem._
import fixedpoint._
import fixedpoint.{fromIntToBinaryPoint, fromSIntToFixedPoint, fromUIntToFixedPoint}
// FIR params
case class GenericFIRParams(
writeAddress: BigInt = 0x2000,
readAddress: BigInt = 0x2100,
depth: Int
)
case object GenericFIRKey extends Field[Option[GenericFIRParams]](None)
class GenericFIRCellBundle[T<:Data:Ring](genIn:T, genOut:T) extends Bundle {
val data: T = genIn.cloneType
val carry: T = genOut.cloneType
}
object GenericFIRCellBundle {
def apply[T<:Data:Ring](genIn:T, genOut:T): GenericFIRCellBundle[T] = new GenericFIRCellBundle(genIn, genOut)
}
class GenericFIRCellIO[T<:Data:Ring](genIn:T, genOut:T) extends Bundle {
val coeff = Input(genIn.cloneType)
val in = Flipped(Decoupled(GenericFIRCellBundle(genIn, genOut)))
val out = Decoupled(GenericFIRCellBundle(genIn, genOut))
}
object GenericFIRCellIO {
def apply[T<:Data:Ring](genIn:T, genOut:T): GenericFIRCellIO[T] = new GenericFIRCellIO(genIn, genOut)
}
class GenericFIRBundle[T<:Data:Ring](proto: T) extends Bundle {
val data: T = proto.cloneType
}
object GenericFIRBundle {
def apply[T<:Data:Ring](proto: T): GenericFIRBundle[T] = new GenericFIRBundle(proto)
}
class GenericFIRIO[T<:Data:Ring](genIn:T, genOut:T) extends Bundle {
val in = Flipped(Decoupled(GenericFIRBundle(genIn)))
val out = Decoupled(GenericFIRBundle(genOut))
}
object GenericFIRIO {
def apply[T<:Data:Ring](genIn:T, genOut:T): GenericFIRIO[T] = new GenericFIRIO(genIn, genOut)
}
// A generic FIR filter
// DOC include start: GenericFIR chisel
class GenericFIR[T<:Data:Ring](genIn:T, genOut:T, coeffs: => Seq[T]) extends Module {
val io = IO(GenericFIRIO(genIn, genOut))
// Construct a vector of genericFIRDirectCells
val directCells = Seq.fill(coeffs.length){ Module(new GenericFIRDirectCell(genIn, genOut)).io }
// Construct the direct FIR chain
for ((cell, coeff) <- directCells.zip(coeffs)) {
cell.coeff := coeff
}
// Connect input to first cell
directCells.head.in.bits.data := io.in.bits.data
directCells.head.in.bits.carry := Ring[T].zero
directCells.head.in.valid := io.in.valid
io.in.ready := directCells.head.in.ready
// Connect adjacent cells
// Note that .tail() returns a collection that consists of all
// elements in the inital collection minus the first one.
// This means that we zip together directCells[0, n] and
// directCells[1, n]. However, since zip ignores unmatched elements,
// the resulting zip is (directCells[0], directCells[1]) ...
// (directCells[n-1], directCells[n])
for ((current, next) <- directCells.zip(directCells.tail)) {
next.in.bits := current.out.bits
next.in.valid := current.out.valid
current.out.ready := next.in.ready
}
// Connect output to last cell
io.out.bits.data := directCells.last.out.bits.carry
directCells.last.out.ready := io.out.ready
io.out.valid := directCells.last.out.valid
}
// DOC include end: GenericFIR chisel
// A generic FIR direct cell used to construct a larger direct FIR chain
//
// in ----- [z^-1]-- out
// |
// coeff ----[*]
// |
// carryIn --[+]-- carryOut
//
// DOC include start: GenericFIRDirectCell chisel
class GenericFIRDirectCell[T<:Data:Ring](genIn: T, genOut: T) extends Module {
val io = IO(GenericFIRCellIO(genIn, genOut))
// Registers to delay the input and the valid to propagate with calculations
val hasNewData = RegInit(0.U)
val inputReg = Reg(genIn.cloneType)
// Passthrough ready
io.in.ready := io.out.ready
// When a new transaction is ready on the input, we will have new data to output
// next cycle. Take this data in
when (io.in.fire) {
hasNewData := 1.U
inputReg := io.in.bits.data
}
// We should output data when our cell has new data to output and is ready to
// recieve new data. This insures that every cell in the chain passes its data
// on at the same time
io.out.valid := hasNewData & io.in.fire
io.out.bits.data := inputReg
// Compute carry
// This uses the ring implementation for + and *, i.e.
// (a * b) maps to (Ring[T].prod(a, b)) for whicever T you use
io.out.bits.carry := inputReg * io.coeff + io.in.bits.carry
}
// DOC include end: GenericFIRDirectCell chisel
// DOC include start: GenericFIRBlock chisel
abstract class GenericFIRBlock[D, U, EO, EI, B<:Data, T<:Data:Ring]
(
genIn: T,
genOut: T,
coeffs: => Seq[T]
)(implicit p: Parameters) extends DspBlock[D, U, EO, EI, B] {
val streamNode = AXI4StreamIdentityNode()
val mem = None
lazy val module = new LazyModuleImp(this) {
require(streamNode.in.length == 1)
require(streamNode.out.length == 1)
val in = streamNode.in.head._1
val out = streamNode.out.head._1
// instantiate generic fir
val fir = Module(new GenericFIR(genIn, genOut, coeffs))
// Attach ready and valid to outside interface
in.ready := fir.io.in.ready
fir.io.in.valid := in.valid
fir.io.out.ready := out.ready
out.valid := fir.io.out.valid
// cast UInt to T
fir.io.in.bits := in.bits.data.asTypeOf(GenericFIRBundle(genIn))
// cast T to UInt
out.bits.data := fir.io.out.bits.asUInt
}
}
// DOC include end: GenericFIRBlock chisel
// DOC include start: TLGenericFIRBlock chisel
class TLGenericFIRBlock[T<:Data:Ring]
(
val genIn: T,
val genOut: T,
coeffs: => Seq[T]
)(implicit p: Parameters) extends
GenericFIRBlock[TLClientPortParameters, TLManagerPortParameters, TLEdgeOut, TLEdgeIn, TLBundle, T](
genIn, genOut, coeffs
) with TLDspBlock
// DOC include end: TLGenericFIRBlock chisel
// DOC include start: TLGenericFIRChain chisel
class TLGenericFIRChain[T<:Data:Ring] (genIn: T, genOut: T, coeffs: => Seq[T], params: GenericFIRParams)(implicit p: Parameters)
extends TLChain(Seq(
TLWriteQueue(params.depth, AddressSet(params.writeAddress, 0xff))(_),
{ implicit p: Parameters =>
val fir = LazyModule(new TLGenericFIRBlock(genIn, genOut, coeffs))
fir
},
TLReadQueue(params.depth, AddressSet(params.readAddress, 0xff))(_)
))
// DOC include end: TLGenericFIRChain chisel
// DOC include start: CanHavePeripheryStreamingFIR chisel
trait CanHavePeripheryStreamingFIR extends BaseSubsystem {
val streamingFIR = p(GenericFIRKey) match {
case Some(params) => {
val pbus = locateTLBusWrapper(PBUS)
val domain = pbus.generateSynchronousDomain.suggestName("fir_domain")
val streamingFIR = domain { LazyModule(new TLGenericFIRChain(
genIn = FixedPoint(8.W, 3.BP),
genOut = FixedPoint(8.W, 3.BP),
coeffs = Seq(1.U.asFixedPoint(0.BP), 2.U.asFixedPoint(0.BP), 3.U.asFixedPoint(0.BP)),
params = params)) }
pbus.coupleTo("streamingFIR") { domain { streamingFIR.mem.get := TLFIFOFixer() := TLFragmenter(pbus.beatBytes, pbus.blockBytes) } := _ }
Some(streamingFIR)
}
case None => None
}
}
// DOC include end: CanHavePeripheryStreamingFIR chisel
/**
* Mixin to add FIR to rocket config
*/
// DOC include start: WithStreamingFIR
class WithStreamingFIR extends Config((site, here, up) => {
case GenericFIRKey => Some(GenericFIRParams(depth = 8))
})
// DOC include end: WithStreamingFIR
| module GenericFIRDirectCell( // @[GenericFIR.scala:107:7]
input clock, // @[GenericFIR.scala:107:7]
input reset, // @[GenericFIR.scala:107:7]
input [7:0] io_coeff, // @[GenericFIR.scala:108:14]
output io_in_ready, // @[GenericFIR.scala:108:14]
input io_in_valid, // @[GenericFIR.scala:108:14]
input [7:0] io_in_bits_data, // @[GenericFIR.scala:108:14]
input [7:0] io_in_bits_carry, // @[GenericFIR.scala:108:14]
input io_out_ready, // @[GenericFIR.scala:108:14]
output io_out_valid, // @[GenericFIR.scala:108:14]
output [7:0] io_out_bits_data, // @[GenericFIR.scala:108:14]
output [7:0] io_out_bits_carry // @[GenericFIR.scala:108:14]
);
reg hasNewData; // @[GenericFIR.scala:111:27]
reg [7:0] inputReg; // @[GenericFIR.scala:112:21]
wire _io_out_valid_T = io_out_ready & io_in_valid; // @[Decoupled.scala:51:35]
wire [10:0] _io_out_bits_carry_T_1 = {{3{inputReg[7]}}, inputReg} * {{3{io_coeff[7]}}, io_coeff} + {io_in_bits_carry, 3'h0}; // @[FixedPointTypeClass.scala:20:58, :42:59]
always @(posedge clock) begin // @[GenericFIR.scala:107:7]
if (reset) // @[GenericFIR.scala:107:7]
hasNewData <= 1'h0; // @[GenericFIR.scala:107:7, :111:27]
else // @[GenericFIR.scala:107:7]
hasNewData <= _io_out_valid_T | hasNewData; // @[Decoupled.scala:51:35]
if (_io_out_valid_T) // @[Decoupled.scala:51:35]
inputReg <= io_in_bits_data; // @[GenericFIR.scala:112:21]
always @(posedge) |
Generate the Verilog code corresponding to the following Chisel files.
File Monitor.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceLine
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import freechips.rocketchip.diplomacy.EnableMonitors
import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode}
import freechips.rocketchip.util.PlusArg
case class TLMonitorArgs(edge: TLEdge)
abstract class TLMonitorBase(args: TLMonitorArgs) extends Module
{
val io = IO(new Bundle {
val in = Input(new TLBundle(args.edge.bundle))
})
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit
legalize(io.in, args.edge, reset)
}
object TLMonitor {
def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = {
if (enable) {
EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) }
} else { node }
}
}
class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args)
{
require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal))
val cover_prop_class = PropertyClass.Default
//Like assert but can flip to being an assumption for formal verification
def monAssert(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir, cond, message, PropertyClass.Default)
}
def assume(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir.flip, cond, message, PropertyClass.Default)
}
def extra = {
args.edge.sourceInfo match {
case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)"
case _ => ""
}
}
def visible(address: UInt, source: UInt, edge: TLEdge) =
edge.client.clients.map { c =>
!c.sourceId.contains(source) ||
c.visibility.map(_.contains(address)).reduce(_ || _)
}.reduce(_ && _)
def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = {
//switch this flag to turn on diplomacy in error messages
def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n"
monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra)
// Reuse these subexpressions to save some firrtl lines
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility")
//The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B
//TODO: check for acquireT?
when (bundle.opcode === TLMessages.AcquireBlock) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AcquirePerm) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra)
monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra)
monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra)
monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra)
}
}
def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = {
monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility")
// Reuse these subexpressions to save some firrtl lines
val address_ok = edge.manager.containsSafe(edge.address(bundle))
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source
when (bundle.opcode === TLMessages.Probe) {
assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra)
assume (address_ok, "'B' channel Probe carries unmanaged address" + extra)
assume (legal_source, "'B' channel Probe carries source that is not first source" + extra)
assume (is_aligned, "'B' channel Probe address not aligned to size" + extra)
assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra)
assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra)
assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra)
monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra)
monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra)
}
}
def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = {
monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val address_ok = edge.manager.containsSafe(edge.address(bundle))
monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility")
when (bundle.opcode === TLMessages.ProbeAck) {
monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ProbeAckData) {
monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.Release) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ReleaseData) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra)
}
}
def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = {
assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val sink_ok = bundle.sink < edge.manager.endSinkId.U
val deny_put_ok = edge.manager.mayDenyPut.B
val deny_get_ok = edge.manager.mayDenyGet.B
when (bundle.opcode === TLMessages.ReleaseAck) {
assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra)
assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra)
assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra)
}
when (bundle.opcode === TLMessages.Grant) {
assume (source_ok, "'D' channel Grant carries invalid source ID" + extra)
assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra)
assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra)
}
when (bundle.opcode === TLMessages.GrantData) {
assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra)
assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra)
}
}
def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = {
val sink_ok = bundle.sink < edge.manager.endSinkId.U
monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra)
}
def legalizeFormat(bundle: TLBundle, edge: TLEdge) = {
when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) }
when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) }
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) }
when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) }
when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) }
} else {
monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra)
monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra)
monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra)
}
}
def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = {
val a_first = edge.first(a.bits, a.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (a.valid && !a_first) {
monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra)
monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra)
monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra)
monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra)
monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra)
}
when (a.fire && a_first) {
opcode := a.bits.opcode
param := a.bits.param
size := a.bits.size
source := a.bits.source
address := a.bits.address
}
}
def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = {
val b_first = edge.first(b.bits, b.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (b.valid && !b_first) {
monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra)
monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra)
monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra)
monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra)
monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra)
}
when (b.fire && b_first) {
opcode := b.bits.opcode
param := b.bits.param
size := b.bits.size
source := b.bits.source
address := b.bits.address
}
}
def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = {
// Symbolic variable
val sym_source = Wire(UInt(edge.client.endSourceId.W))
// TODO: Connect sym_source to a fixed value for simulation and to a
// free wire in formal
sym_source := 0.U
// Type casting Int to UInt
val maxSourceId = Wire(UInt(edge.client.endSourceId.W))
maxSourceId := edge.client.endSourceId.U
// Delayed verison of sym_source
val sym_source_d = Reg(UInt(edge.client.endSourceId.W))
sym_source_d := sym_source
// These will be constraints for FV setup
Property(
MonitorDirection.Monitor,
(sym_source === sym_source_d),
"sym_source should remain stable",
PropertyClass.Default)
Property(
MonitorDirection.Monitor,
(sym_source <= maxSourceId),
"sym_source should take legal value",
PropertyClass.Default)
val my_resp_pend = RegInit(false.B)
val my_opcode = Reg(UInt())
val my_size = Reg(UInt())
val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire)
val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire)
val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source)
val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source)
val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat)
val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend)
when (my_set_resp_pend) {
my_resp_pend := true.B
} .elsewhen (my_clr_resp_pend) {
my_resp_pend := false.B
}
when (my_a_first_beat) {
my_opcode := bundle.a.bits.opcode
my_size := bundle.a.bits.size
}
val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size)
val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode)
val my_resp_opcode_legal = Wire(Bool())
when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) ||
(my_resp_opcode === TLMessages.LogicalData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData)
} .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck)
} .otherwise {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck)
}
monAssert (IfThen(my_resp_pend, !my_a_first_beat),
"Request message should not be sent with a source ID, for which a response message" +
"is already pending (not received until current cycle) for a prior request message" +
"with the same source ID" + extra)
assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)),
"Response message should be accepted with a source ID only if a request message with the" +
"same source ID has been accepted or is being accepted in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)),
"Response message should be sent with a source ID only if a request message with the" +
"same source ID has been accepted or is being sent in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)),
"If d_valid is 1, then d_size should be same as a_size of the corresponding request" +
"message" + extra)
assume (IfThen(my_d_first_beat, my_resp_opcode_legal),
"If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" +
"request message" + extra)
}
def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = {
val c_first = edge.first(c.bits, c.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (c.valid && !c_first) {
monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra)
monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra)
monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra)
monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra)
monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra)
}
when (c.fire && c_first) {
opcode := c.bits.opcode
param := c.bits.param
size := c.bits.size
source := c.bits.source
address := c.bits.address
}
}
def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = {
val d_first = edge.first(d.bits, d.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val sink = Reg(UInt())
val denied = Reg(Bool())
when (d.valid && !d_first) {
assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra)
assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra)
assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra)
assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra)
assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra)
assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra)
}
when (d.fire && d_first) {
opcode := d.bits.opcode
param := d.bits.param
size := d.bits.size
source := d.bits.source
sink := d.bits.sink
denied := d.bits.denied
}
}
def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = {
legalizeMultibeatA(bundle.a, edge)
legalizeMultibeatD(bundle.d, edge)
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
legalizeMultibeatB(bundle.b, edge)
legalizeMultibeatC(bundle.c, edge)
}
}
//This is left in for almond which doesn't adhere to the tilelink protocol
@deprecated("Use legalizeADSource instead if possible","")
def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.client.endSourceId.W))
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val a_set = WireInit(0.U(edge.client.endSourceId.W))
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra)
}
if (edge.manager.minLatency > 0) {
assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = {
val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size)
val log_a_size_bus_size = log2Ceil(a_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error
inflight.suggestName("inflight")
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
inflight_opcodes.suggestName("inflight_opcodes")
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
inflight_sizes.suggestName("inflight_sizes")
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
a_first.suggestName("a_first")
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
d_first.suggestName("d_first")
val a_set = WireInit(0.U(edge.client.endSourceId.W))
val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
a_set.suggestName("a_set")
a_set_wo_ready.suggestName("a_set_wo_ready")
val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
a_opcodes_set.suggestName("a_opcodes_set")
val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
a_sizes_set.suggestName("a_sizes_set")
val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W))
a_opcode_lookup.suggestName("a_opcode_lookup")
a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U
val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W))
a_size_lookup.suggestName("a_size_lookup")
a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U
val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant))
val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant))
val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W))
a_opcodes_set_interm.suggestName("a_opcodes_set_interm")
val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W))
a_sizes_set_interm.suggestName("a_sizes_set_interm")
when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) {
a_set_wo_ready := UIntToOH(bundle.a.bits.source)
}
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U
a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U
a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U)
a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U)
monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
d_opcodes_clr.suggestName("d_opcodes_clr")
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) ||
(bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra)
assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) ||
(bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra)
assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) {
assume((!bundle.d.ready) || bundle.a.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = {
val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size)
val log_c_size_bus_size = log2Ceil(c_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W))
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
inflight.suggestName("inflight")
inflight_opcodes.suggestName("inflight_opcodes")
inflight_sizes.suggestName("inflight_sizes")
val c_first = edge.first(bundle.c.bits, bundle.c.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
c_first.suggestName("c_first")
d_first.suggestName("d_first")
val c_set = WireInit(0.U(edge.client.endSourceId.W))
val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
c_set.suggestName("c_set")
c_set_wo_ready.suggestName("c_set_wo_ready")
c_opcodes_set.suggestName("c_opcodes_set")
c_sizes_set.suggestName("c_sizes_set")
val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W))
val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W))
c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U
c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U
c_opcode_lookup.suggestName("c_opcode_lookup")
c_size_lookup.suggestName("c_size_lookup")
val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W))
val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W))
c_opcodes_set_interm.suggestName("c_opcodes_set_interm")
c_sizes_set_interm.suggestName("c_sizes_set_interm")
when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) {
c_set_wo_ready := UIntToOH(bundle.c.bits.source)
}
when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) {
c_set := UIntToOH(bundle.c.bits.source)
c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U
c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U
c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U)
c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U)
monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra)
}
val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
d_opcodes_clr.suggestName("d_opcodes_clr")
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) {
assume((!bundle.d.ready) || bundle.c.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
when (c_set_wo_ready.orR) {
assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra)
}
}
inflight := (inflight | c_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.manager.endSinkId.W))
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val e_first = true.B
val d_set = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) {
d_set := UIntToOH(bundle.d.bits.sink)
assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra)
}
val e_clr = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) {
e_clr := UIntToOH(bundle.e.bits.sink)
monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra)
}
// edge.client.minLatency applies to BC, not DE
inflight := (inflight | d_set) & ~e_clr
}
def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = {
val sourceBits = log2Ceil(edge.client.endSourceId)
val tooBig = 14 // >16kB worth of flight information gets to be too much
if (sourceBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked")
} else {
if (args.edge.params(TestplanTestType).simulation) {
if (args.edge.params(TLMonitorStrictMode)) {
legalizeADSource(bundle, edge)
legalizeCDSource(bundle, edge)
} else {
legalizeADSourceOld(bundle, edge)
}
}
if (args.edge.params(TestplanTestType).formal) {
legalizeADSourceFormal(bundle, edge)
}
}
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
// legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize...
val sinkBits = log2Ceil(edge.manager.endSinkId)
if (sinkBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked")
} else {
legalizeDESink(bundle, edge)
}
}
}
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = {
legalizeFormat (bundle, edge)
legalizeMultibeat (bundle, edge)
legalizeUnique (bundle, edge)
}
}
File Misc.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
import chisel3.util.random.LFSR
import org.chipsalliance.cde.config.Parameters
import scala.math._
class ParameterizedBundle(implicit p: Parameters) extends Bundle
trait Clocked extends Bundle {
val clock = Clock()
val reset = Bool()
}
object DecoupledHelper {
def apply(rvs: Bool*) = new DecoupledHelper(rvs)
}
class DecoupledHelper(val rvs: Seq[Bool]) {
def fire(exclude: Bool, includes: Bool*) = {
require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!")
(rvs.filter(_ ne exclude) ++ includes).reduce(_ && _)
}
def fire() = {
rvs.reduce(_ && _)
}
}
object MuxT {
def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2))
def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3))
def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4))
}
/** Creates a cascade of n MuxTs to search for a key value. */
object MuxTLookup {
def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
}
object ValidMux {
def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = {
apply(v1 +: v2.toSeq)
}
def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = {
val out = Wire(Valid(valids.head.bits.cloneType))
out.valid := valids.map(_.valid).reduce(_ || _)
out.bits := MuxCase(valids.head.bits,
valids.map(v => (v.valid -> v.bits)))
out
}
}
object Str
{
def apply(s: String): UInt = {
var i = BigInt(0)
require(s.forall(validChar _))
for (c <- s)
i = (i << 8) | c
i.U((s.length*8).W)
}
def apply(x: Char): UInt = {
require(validChar(x))
x.U(8.W)
}
def apply(x: UInt): UInt = apply(x, 10)
def apply(x: UInt, radix: Int): UInt = {
val rad = radix.U
val w = x.getWidth
require(w > 0)
var q = x
var s = digit(q % rad)
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s)
}
s
}
def apply(x: SInt): UInt = apply(x, 10)
def apply(x: SInt, radix: Int): UInt = {
val neg = x < 0.S
val abs = x.abs.asUInt
if (radix != 10) {
Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix))
} else {
val rad = radix.U
val w = abs.getWidth
require(w > 0)
var q = abs
var s = digit(q % rad)
var needSign = neg
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
val placeSpace = q === 0.U
val space = Mux(needSign, Str('-'), Str(' '))
needSign = needSign && !placeSpace
s = Cat(Mux(placeSpace, space, digit(q % rad)), s)
}
Cat(Mux(needSign, Str('-'), Str(' ')), s)
}
}
private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0)
private def validChar(x: Char) = x == (x & 0xFF)
}
object Split
{
def apply(x: UInt, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n2: Int, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
}
object Random
{
def apply(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0)
else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod))
}
def apply(mod: Int): UInt = apply(mod, randomizer)
def oneHot(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0))
else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt
}
def oneHot(mod: Int): UInt = oneHot(mod, randomizer)
private def randomizer = LFSR(16)
private def partition(value: UInt, slices: Int) =
Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U)
}
object Majority {
def apply(in: Set[Bool]): Bool = {
val n = (in.size >> 1) + 1
val clauses = in.subsets(n).map(_.reduce(_ && _))
clauses.reduce(_ || _)
}
def apply(in: Seq[Bool]): Bool = apply(in.toSet)
def apply(in: UInt): Bool = apply(in.asBools.toSet)
}
object PopCountAtLeast {
private def two(x: UInt): (Bool, Bool) = x.getWidth match {
case 1 => (x.asBool, false.B)
case n =>
val half = x.getWidth / 2
val (leftOne, leftTwo) = two(x(half - 1, 0))
val (rightOne, rightTwo) = two(x(x.getWidth - 1, half))
(leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne))
}
def apply(x: UInt, n: Int): Bool = n match {
case 0 => true.B
case 1 => x.orR
case 2 => two(x)._2
case 3 => PopCount(x) >= n.U
}
}
// This gets used everywhere, so make the smallest circuit possible ...
// Given an address and size, create a mask of beatBytes size
// eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111
// groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01
object MaskGen {
def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = {
require (groupBy >= 1 && beatBytes >= groupBy)
require (isPow2(beatBytes) && isPow2(groupBy))
val lgBytes = log2Ceil(beatBytes)
val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U
def helper(i: Int): Seq[(Bool, Bool)] = {
if (i == 0) {
Seq((lgSize >= lgBytes.asUInt, true.B))
} else {
val sub = helper(i-1)
val size = sizeOH(lgBytes - i)
val bit = addr_lo(lgBytes - i)
val nbit = !bit
Seq.tabulate (1 << i) { j =>
val (sub_acc, sub_eq) = sub(j/2)
val eq = sub_eq && (if (j % 2 == 1) bit else nbit)
val acc = sub_acc || (size && eq)
(acc, eq)
}
}
}
if (groupBy == beatBytes) 1.U else
Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse)
}
}
File PlusArg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.experimental._
import chisel3.util.HasBlackBoxResource
@deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05")
case class PlusArgInfo(default: BigInt, docstring: String)
/** Case class for PlusArg information
*
* @tparam A scala type of the PlusArg value
* @param default optional default value
* @param docstring text to include in the help
* @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT)
*/
private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String)
/** Typeclass for converting a type to a doctype string
* @tparam A some type
*/
trait Doctypeable[A] {
/** Return the doctype string for some option */
def toDoctype(a: Option[A]): String
}
/** Object containing implementations of the Doctypeable typeclass */
object Doctypes {
/** Converts an Int => "INT" */
implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" }
/** Converts a BigInt => "INT" */
implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" }
/** Converts a String => "STRING" */
implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" }
}
class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map(
"FORMAT" -> StringParam(format),
"DEFAULT" -> IntParam(default),
"WIDTH" -> IntParam(width)
)) with HasBlackBoxResource {
val io = IO(new Bundle {
val out = Output(UInt(width.W))
})
addResource("/vsrc/plusarg_reader.v")
}
/* This wrapper class has no outputs, making it clear it is a simulation-only construct */
class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module {
val io = IO(new Bundle {
val count = Input(UInt(width.W))
})
val max = Module(new plusarg_reader(format, default, docstring, width)).io.out
when (max > 0.U) {
assert (io.count < max, s"Timeout exceeded: $docstring")
}
}
import Doctypes._
object PlusArg
{
/** PlusArg("foo") will return 42.U if the simulation is run with +foo=42
* Do not use this as an initial register value. The value is set in an
* initial block and thus accessing it from another initial is racey.
* Add a docstring to document the arg, which can be dumped in an elaboration
* pass.
*/
def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out
}
/** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert
* to kill the simulation when count exceeds the specified integer argument.
* Default 0 will never assert.
*/
def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count
}
}
object PlusArgArtefacts {
private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty
/* Add a new PlusArg */
@deprecated(
"Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08",
"Rocket Chip 2020.05"
)
def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring)
/** Add a new PlusArg
*
* @tparam A scala type of the PlusArg value
* @param name name for the PlusArg
* @param default optional default value
* @param docstring text to include in the help
*/
def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit =
artefacts = artefacts ++
Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default)))
/* From plus args, generate help text */
private def serializeHelp_cHeader(tab: String = ""): String = artefacts
.map{ case(arg, info) =>
s"""|$tab+$arg=${info.doctype}\\n\\
|$tab${" "*20}${info.docstring}\\n\\
|""".stripMargin ++ info.default.map{ case default =>
s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("")
}.toSeq.mkString("\\n\\\n") ++ "\""
/* From plus args, generate a char array of their names */
private def serializeArray_cHeader(tab: String = ""): String = {
val prettyTab = tab + " " * 44 // Length of 'static const ...'
s"${tab}static const char * verilog_plusargs [] = {\\\n" ++
artefacts
.map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" }
.mkString("")++
s"${prettyTab}0};"
}
/* Generate C code to be included in emulator.cc that helps with
* argument parsing based on available Verilog PlusArgs */
def serialize_cHeader(): String =
s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\
|${serializeHelp_cHeader(" "*7)}
|${serializeArray_cHeader()}
|""".stripMargin
}
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
File Bundles.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import freechips.rocketchip.util._
import scala.collection.immutable.ListMap
import chisel3.util.Decoupled
import chisel3.util.DecoupledIO
import chisel3.reflect.DataMirror
abstract class TLBundleBase(val params: TLBundleParameters) extends Bundle
// common combos in lazy policy:
// Put + Acquire
// Release + AccessAck
object TLMessages
{
// A B C D E
def PutFullData = 0.U // . . => AccessAck
def PutPartialData = 1.U // . . => AccessAck
def ArithmeticData = 2.U // . . => AccessAckData
def LogicalData = 3.U // . . => AccessAckData
def Get = 4.U // . . => AccessAckData
def Hint = 5.U // . . => HintAck
def AcquireBlock = 6.U // . => Grant[Data]
def AcquirePerm = 7.U // . => Grant[Data]
def Probe = 6.U // . => ProbeAck[Data]
def AccessAck = 0.U // . .
def AccessAckData = 1.U // . .
def HintAck = 2.U // . .
def ProbeAck = 4.U // .
def ProbeAckData = 5.U // .
def Release = 6.U // . => ReleaseAck
def ReleaseData = 7.U // . => ReleaseAck
def Grant = 4.U // . => GrantAck
def GrantData = 5.U // . => GrantAck
def ReleaseAck = 6.U // .
def GrantAck = 0.U // .
def isA(x: UInt) = x <= AcquirePerm
def isB(x: UInt) = x <= Probe
def isC(x: UInt) = x <= ReleaseData
def isD(x: UInt) = x <= ReleaseAck
def adResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, Grant, Grant)
def bcResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, ProbeAck, ProbeAck)
def a = Seq( ("PutFullData",TLPermissions.PermMsgReserved),
("PutPartialData",TLPermissions.PermMsgReserved),
("ArithmeticData",TLAtomics.ArithMsg),
("LogicalData",TLAtomics.LogicMsg),
("Get",TLPermissions.PermMsgReserved),
("Hint",TLHints.HintsMsg),
("AcquireBlock",TLPermissions.PermMsgGrow),
("AcquirePerm",TLPermissions.PermMsgGrow))
def b = Seq( ("PutFullData",TLPermissions.PermMsgReserved),
("PutPartialData",TLPermissions.PermMsgReserved),
("ArithmeticData",TLAtomics.ArithMsg),
("LogicalData",TLAtomics.LogicMsg),
("Get",TLPermissions.PermMsgReserved),
("Hint",TLHints.HintsMsg),
("Probe",TLPermissions.PermMsgCap))
def c = Seq( ("AccessAck",TLPermissions.PermMsgReserved),
("AccessAckData",TLPermissions.PermMsgReserved),
("HintAck",TLPermissions.PermMsgReserved),
("Invalid Opcode",TLPermissions.PermMsgReserved),
("ProbeAck",TLPermissions.PermMsgReport),
("ProbeAckData",TLPermissions.PermMsgReport),
("Release",TLPermissions.PermMsgReport),
("ReleaseData",TLPermissions.PermMsgReport))
def d = Seq( ("AccessAck",TLPermissions.PermMsgReserved),
("AccessAckData",TLPermissions.PermMsgReserved),
("HintAck",TLPermissions.PermMsgReserved),
("Invalid Opcode",TLPermissions.PermMsgReserved),
("Grant",TLPermissions.PermMsgCap),
("GrantData",TLPermissions.PermMsgCap),
("ReleaseAck",TLPermissions.PermMsgReserved))
}
/**
* The three primary TileLink permissions are:
* (T)runk: the agent is (or is on inwards path to) the global point of serialization.
* (B)ranch: the agent is on an outwards path to
* (N)one:
* These permissions are permuted by transfer operations in various ways.
* Operations can cap permissions, request for them to be grown or shrunk,
* or for a report on their current status.
*/
object TLPermissions
{
val aWidth = 2
val bdWidth = 2
val cWidth = 3
// Cap types (Grant = new permissions, Probe = permisions <= target)
def toT = 0.U(bdWidth.W)
def toB = 1.U(bdWidth.W)
def toN = 2.U(bdWidth.W)
def isCap(x: UInt) = x <= toN
// Grow types (Acquire = permissions >= target)
def NtoB = 0.U(aWidth.W)
def NtoT = 1.U(aWidth.W)
def BtoT = 2.U(aWidth.W)
def isGrow(x: UInt) = x <= BtoT
// Shrink types (ProbeAck, Release)
def TtoB = 0.U(cWidth.W)
def TtoN = 1.U(cWidth.W)
def BtoN = 2.U(cWidth.W)
def isShrink(x: UInt) = x <= BtoN
// Report types (ProbeAck, Release)
def TtoT = 3.U(cWidth.W)
def BtoB = 4.U(cWidth.W)
def NtoN = 5.U(cWidth.W)
def isReport(x: UInt) = x <= NtoN
def PermMsgGrow:Seq[String] = Seq("Grow NtoB", "Grow NtoT", "Grow BtoT")
def PermMsgCap:Seq[String] = Seq("Cap toT", "Cap toB", "Cap toN")
def PermMsgReport:Seq[String] = Seq("Shrink TtoB", "Shrink TtoN", "Shrink BtoN", "Report TotT", "Report BtoB", "Report NtoN")
def PermMsgReserved:Seq[String] = Seq("Reserved")
}
object TLAtomics
{
val width = 3
// Arithmetic types
def MIN = 0.U(width.W)
def MAX = 1.U(width.W)
def MINU = 2.U(width.W)
def MAXU = 3.U(width.W)
def ADD = 4.U(width.W)
def isArithmetic(x: UInt) = x <= ADD
// Logical types
def XOR = 0.U(width.W)
def OR = 1.U(width.W)
def AND = 2.U(width.W)
def SWAP = 3.U(width.W)
def isLogical(x: UInt) = x <= SWAP
def ArithMsg:Seq[String] = Seq("MIN", "MAX", "MINU", "MAXU", "ADD")
def LogicMsg:Seq[String] = Seq("XOR", "OR", "AND", "SWAP")
}
object TLHints
{
val width = 1
def PREFETCH_READ = 0.U(width.W)
def PREFETCH_WRITE = 1.U(width.W)
def isHints(x: UInt) = x <= PREFETCH_WRITE
def HintsMsg:Seq[String] = Seq("PrefetchRead", "PrefetchWrite")
}
sealed trait TLChannel extends TLBundleBase {
val channelName: String
}
sealed trait TLDataChannel extends TLChannel
sealed trait TLAddrChannel extends TLDataChannel
final class TLBundleA(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleA_${params.shortName}"
val channelName = "'A' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(List(TLAtomics.width, TLPermissions.aWidth, TLHints.width).max.W) // amo_opcode || grow perms || hint
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // from
val address = UInt(params.addressBits.W) // to
val user = BundleMap(params.requestFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val mask = UInt((params.dataBits/8).W)
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleB(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleB_${params.shortName}"
val channelName = "'B' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.bdWidth.W) // cap perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // to
val address = UInt(params.addressBits.W) // from
// variable fields during multibeat:
val mask = UInt((params.dataBits/8).W)
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleC(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleC_${params.shortName}"
val channelName = "'C' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.cWidth.W) // shrink or report perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // from
val address = UInt(params.addressBits.W) // to
val user = BundleMap(params.requestFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleD(params: TLBundleParameters)
extends TLBundleBase(params) with TLDataChannel
{
override def typeName = s"TLBundleD_${params.shortName}"
val channelName = "'D' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.bdWidth.W) // cap perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // to
val sink = UInt(params.sinkBits.W) // from
val denied = Bool() // implies corrupt iff *Data
val user = BundleMap(params.responseFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleE(params: TLBundleParameters)
extends TLBundleBase(params) with TLChannel
{
override def typeName = s"TLBundleE_${params.shortName}"
val channelName = "'E' channel"
val sink = UInt(params.sinkBits.W) // to
}
class TLBundle(val params: TLBundleParameters) extends Record
{
// Emulate a Bundle with elements abcde or ad depending on params.hasBCE
private val optA = Some (Decoupled(new TLBundleA(params)))
private val optB = params.hasBCE.option(Flipped(Decoupled(new TLBundleB(params))))
private val optC = params.hasBCE.option(Decoupled(new TLBundleC(params)))
private val optD = Some (Flipped(Decoupled(new TLBundleD(params))))
private val optE = params.hasBCE.option(Decoupled(new TLBundleE(params)))
def a: DecoupledIO[TLBundleA] = optA.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleA(params)))))
def b: DecoupledIO[TLBundleB] = optB.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleB(params)))))
def c: DecoupledIO[TLBundleC] = optC.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleC(params)))))
def d: DecoupledIO[TLBundleD] = optD.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleD(params)))))
def e: DecoupledIO[TLBundleE] = optE.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleE(params)))))
val elements =
if (params.hasBCE) ListMap("e" -> e, "d" -> d, "c" -> c, "b" -> b, "a" -> a)
else ListMap("d" -> d, "a" -> a)
def tieoff(): Unit = {
DataMirror.specifiedDirectionOf(a.ready) match {
case SpecifiedDirection.Input =>
a.ready := false.B
c.ready := false.B
e.ready := false.B
b.valid := false.B
d.valid := false.B
case SpecifiedDirection.Output =>
a.valid := false.B
c.valid := false.B
e.valid := false.B
b.ready := false.B
d.ready := false.B
case _ =>
}
}
}
object TLBundle
{
def apply(params: TLBundleParameters) = new TLBundle(params)
}
class TLAsyncBundleBase(val params: TLAsyncBundleParameters) extends Bundle
class TLAsyncBundle(params: TLAsyncBundleParameters) extends TLAsyncBundleBase(params)
{
val a = new AsyncBundle(new TLBundleA(params.base), params.async)
val b = Flipped(new AsyncBundle(new TLBundleB(params.base), params.async))
val c = new AsyncBundle(new TLBundleC(params.base), params.async)
val d = Flipped(new AsyncBundle(new TLBundleD(params.base), params.async))
val e = new AsyncBundle(new TLBundleE(params.base), params.async)
}
class TLRationalBundle(params: TLBundleParameters) extends TLBundleBase(params)
{
val a = RationalIO(new TLBundleA(params))
val b = Flipped(RationalIO(new TLBundleB(params)))
val c = RationalIO(new TLBundleC(params))
val d = Flipped(RationalIO(new TLBundleD(params)))
val e = RationalIO(new TLBundleE(params))
}
class TLCreditedBundle(params: TLBundleParameters) extends TLBundleBase(params)
{
val a = CreditedIO(new TLBundleA(params))
val b = Flipped(CreditedIO(new TLBundleB(params)))
val c = CreditedIO(new TLBundleC(params))
val d = Flipped(CreditedIO(new TLBundleD(params)))
val e = CreditedIO(new TLBundleE(params))
}
File Parameters.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.diplomacy
import chisel3._
import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor}
import freechips.rocketchip.util.ShiftQueue
/** Options for describing the attributes of memory regions */
object RegionType {
// Define the 'more relaxed than' ordering
val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS)
sealed trait T extends Ordered[T] {
def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this)
}
case object CACHED extends T // an intermediate agent may have cached a copy of the region for you
case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided
case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible
case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached
case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects
case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed
case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively
}
// A non-empty half-open range; [start, end)
case class IdRange(start: Int, end: Int) extends Ordered[IdRange]
{
require (start >= 0, s"Ids cannot be negative, but got: $start.")
require (start <= end, "Id ranges cannot be negative.")
def compare(x: IdRange) = {
val primary = (this.start - x.start).signum
val secondary = (x.end - this.end).signum
if (primary != 0) primary else secondary
}
def overlaps(x: IdRange) = start < x.end && x.start < end
def contains(x: IdRange) = start <= x.start && x.end <= end
def contains(x: Int) = start <= x && x < end
def contains(x: UInt) =
if (size == 0) {
false.B
} else if (size == 1) { // simple comparison
x === start.U
} else {
// find index of largest different bit
val largestDeltaBit = log2Floor(start ^ (end-1))
val smallestCommonBit = largestDeltaBit + 1 // may not exist in x
val uncommonMask = (1 << smallestCommonBit) - 1
val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0)
// the prefix must match exactly (note: may shift ALL bits away)
(x >> smallestCommonBit) === (start >> smallestCommonBit).U &&
// firrtl constant prop range analysis can eliminate these two:
(start & uncommonMask).U <= uncommonBits &&
uncommonBits <= ((end-1) & uncommonMask).U
}
def shift(x: Int) = IdRange(start+x, end+x)
def size = end - start
def isEmpty = end == start
def range = start until end
}
object IdRange
{
def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else {
val ranges = s.sorted
(ranges.tail zip ranges.init) find { case (a, b) => a overlaps b }
}
}
// An potentially empty inclusive range of 2-powers [min, max] (in bytes)
case class TransferSizes(min: Int, max: Int)
{
def this(x: Int) = this(x, x)
require (min <= max, s"Min transfer $min > max transfer $max")
require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)")
require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max")
require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min")
require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)")
def none = min == 0
def contains(x: Int) = isPow2(x) && min <= x && x <= max
def containsLg(x: Int) = contains(1 << x)
def containsLg(x: UInt) =
if (none) false.B
else if (min == max) { log2Ceil(min).U === x }
else { log2Ceil(min).U <= x && x <= log2Ceil(max).U }
def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max)
def intersect(x: TransferSizes) =
if (x.max < min || max < x.min) TransferSizes.none
else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max))
// Not a union, because the result may contain sizes contained by neither term
// NOT TO BE CONFUSED WITH COVERPOINTS
def mincover(x: TransferSizes) = {
if (none) {
x
} else if (x.none) {
this
} else {
TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max))
}
}
override def toString() = "TransferSizes[%d, %d]".format(min, max)
}
object TransferSizes {
def apply(x: Int) = new TransferSizes(x)
val none = new TransferSizes(0)
def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _)
def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _)
implicit def asBool(x: TransferSizes) = !x.none
}
// AddressSets specify the address space managed by the manager
// Base is the base address, and mask are the bits consumed by the manager
// e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff
// e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ...
case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet]
{
// Forbid misaligned base address (and empty sets)
require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}")
require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous
// We do allow negative mask (=> ignore all high bits)
def contains(x: BigInt) = ((x ^ base) & ~mask) == 0
def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S
// turn x into an address contained in this set
def legalize(x: UInt): UInt = base.U | (mask.U & x)
// overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1)
def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0
// contains iff bitwise: x.mask => mask && contains(x.base)
def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0
// The number of bytes to which the manager must be aligned
def alignment = ((mask + 1) & ~mask)
// Is this a contiguous memory range
def contiguous = alignment == mask+1
def finite = mask >= 0
def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask }
// Widen the match function to ignore all bits in imask
def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask)
// Return an AddressSet that only contains the addresses both sets contain
def intersect(x: AddressSet): Option[AddressSet] = {
if (!overlaps(x)) {
None
} else {
val r_mask = mask & x.mask
val r_base = base | x.base
Some(AddressSet(r_base, r_mask))
}
}
def subtract(x: AddressSet): Seq[AddressSet] = {
intersect(x) match {
case None => Seq(this)
case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit =>
val nmask = (mask & (bit-1)) | remove.mask
val nbase = (remove.base ^ bit) & ~nmask
AddressSet(nbase, nmask)
}
}
}
// AddressSets have one natural Ordering (the containment order, if contiguous)
def compare(x: AddressSet) = {
val primary = (this.base - x.base).signum // smallest address first
val secondary = (x.mask - this.mask).signum // largest mask first
if (primary != 0) primary else secondary
}
// We always want to see things in hex
override def toString() = {
if (mask >= 0) {
"AddressSet(0x%x, 0x%x)".format(base, mask)
} else {
"AddressSet(0x%x, ~0x%x)".format(base, ~mask)
}
}
def toRanges = {
require (finite, "Ranges cannot be calculated on infinite mask")
val size = alignment
val fragments = mask & ~(size-1)
val bits = bitIndexes(fragments)
(BigInt(0) until (BigInt(1) << bits.size)).map { i =>
val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) }
AddressRange(off, size)
}
}
}
object AddressSet
{
val everything = AddressSet(0, -1)
def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = {
if (size == 0) tail.reverse else {
val maxBaseAlignment = base & (-base) // 0 for infinite (LSB)
val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size
val step =
if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment)
maxSizeAlignment else maxBaseAlignment
misaligned(base+step, size-step, AddressSet(base, step-1) +: tail)
}
}
def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = {
// Pair terms up by ignoring 'bit'
seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) =>
if (seq.size == 1) {
seq.head // singleton -> unaffected
} else {
key.copy(mask = key.mask | bit) // pair - widen mask by bit
}
}.toList
}
def unify(seq: Seq[AddressSet]): Seq[AddressSet] = {
val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _)
AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted
}
def enumerateMask(mask: BigInt): Seq[BigInt] = {
def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] =
if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail)
helper(0, Nil)
}
def enumerateBits(mask: BigInt): Seq[BigInt] = {
def helper(x: BigInt): Seq[BigInt] = {
if (x == 0) {
Nil
} else {
val bit = x & (-x)
bit +: helper(x & ~bit)
}
}
helper(mask)
}
}
case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean)
{
require (depth >= 0, "Buffer depth must be >= 0")
def isDefined = depth > 0
def latency = if (isDefined && !flow) 1 else 0
def apply[T <: Data](x: DecoupledIO[T]) =
if (isDefined) Queue(x, depth, flow=flow, pipe=pipe)
else x
def irrevocable[T <: Data](x: ReadyValidIO[T]) =
if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe)
else x
def sq[T <: Data](x: DecoupledIO[T]) =
if (!isDefined) x else {
val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe))
sq.io.enq <> x
sq.io.deq
}
override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "")
}
object BufferParams
{
implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false)
val default = BufferParams(2)
val none = BufferParams(0)
val flow = BufferParams(1, true, false)
val pipe = BufferParams(1, false, true)
}
case class TriStateValue(value: Boolean, set: Boolean)
{
def update(orig: Boolean) = if (set) value else orig
}
object TriStateValue
{
implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true)
def unset = TriStateValue(false, false)
}
trait DirectedBuffers[T] {
def copyIn(x: BufferParams): T
def copyOut(x: BufferParams): T
def copyInOut(x: BufferParams): T
}
trait IdMapEntry {
def name: String
def from: IdRange
def to: IdRange
def isCache: Boolean
def requestFifo: Boolean
def maxTransactionsInFlight: Option[Int]
def pretty(fmt: String) =
if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5
fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
} else {
fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
}
}
abstract class IdMap[T <: IdMapEntry] {
protected val fmt: String
val mapping: Seq[T]
def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n")
}
File Edges.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
class TLEdge(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdgeParameters(client, manager, params, sourceInfo)
{
def isAligned(address: UInt, lgSize: UInt): Bool = {
if (maxLgSize == 0) true.B else {
val mask = UIntToOH1(lgSize, maxLgSize)
(address & mask) === 0.U
}
}
def mask(address: UInt, lgSize: UInt): UInt =
MaskGen(address, lgSize, manager.beatBytes)
def staticHasData(bundle: TLChannel): Option[Boolean] = {
bundle match {
case _:TLBundleA => {
// Do there exist A messages with Data?
val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial
// Do there exist A messages without Data?
val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint
// Statically optimize the case where hasData is a constant
if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None
}
case _:TLBundleB => {
// Do there exist B messages with Data?
val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial
// Do there exist B messages without Data?
val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint
// Statically optimize the case where hasData is a constant
if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None
}
case _:TLBundleC => {
// Do there eixst C messages with Data?
val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe
// Do there exist C messages without Data?
val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe
if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None
}
case _:TLBundleD => {
// Do there eixst D messages with Data?
val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB
// Do there exist D messages without Data?
val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT
if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None
}
case _:TLBundleE => Some(false)
}
}
def isRequest(x: TLChannel): Bool = {
x match {
case a: TLBundleA => true.B
case b: TLBundleB => true.B
case c: TLBundleC => c.opcode(2) && c.opcode(1)
// opcode === TLMessages.Release ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(2) && !d.opcode(1)
// opcode === TLMessages.Grant ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
}
def isResponse(x: TLChannel): Bool = {
x match {
case a: TLBundleA => false.B
case b: TLBundleB => false.B
case c: TLBundleC => !c.opcode(2) || !c.opcode(1)
// opcode =/= TLMessages.Release &&
// opcode =/= TLMessages.ReleaseData
case d: TLBundleD => true.B // Grant isResponse + isRequest
case e: TLBundleE => true.B
}
}
def hasData(x: TLChannel): Bool = {
val opdata = x match {
case a: TLBundleA => !a.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case b: TLBundleB => !b.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case c: TLBundleC => c.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.ProbeAckData ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
staticHasData(x).map(_.B).getOrElse(opdata)
}
def opcode(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.opcode
case b: TLBundleB => b.opcode
case c: TLBundleC => c.opcode
case d: TLBundleD => d.opcode
}
}
def param(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.param
case b: TLBundleB => b.param
case c: TLBundleC => c.param
case d: TLBundleD => d.param
}
}
def size(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.size
case b: TLBundleB => b.size
case c: TLBundleC => c.size
case d: TLBundleD => d.size
}
}
def data(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.data
case b: TLBundleB => b.data
case c: TLBundleC => c.data
case d: TLBundleD => d.data
}
}
def corrupt(x: TLDataChannel): Bool = {
x match {
case a: TLBundleA => a.corrupt
case b: TLBundleB => b.corrupt
case c: TLBundleC => c.corrupt
case d: TLBundleD => d.corrupt
}
}
def mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.mask
case b: TLBundleB => b.mask
case c: TLBundleC => mask(c.address, c.size)
}
}
def full_mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => mask(a.address, a.size)
case b: TLBundleB => mask(b.address, b.size)
case c: TLBundleC => mask(c.address, c.size)
}
}
def address(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.address
case b: TLBundleB => b.address
case c: TLBundleC => c.address
}
}
def source(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.source
case b: TLBundleB => b.source
case c: TLBundleC => c.source
case d: TLBundleD => d.source
}
}
def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes)
def addr_lo(x: UInt): UInt =
if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0)
def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x))
def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x))
def numBeats(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 1.U
case bundle: TLDataChannel => {
val hasData = this.hasData(bundle)
val size = this.size(bundle)
val cutoff = log2Ceil(manager.beatBytes)
val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U
val decode = UIntToOH(size, maxLgSize+1) >> cutoff
Mux(hasData, decode | small.asUInt, 1.U)
}
}
}
def numBeats1(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 0.U
case bundle: TLDataChannel => {
if (maxLgSize == 0) {
0.U
} else {
val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes)
Mux(hasData(bundle), decode, 0.U)
}
}
}
}
def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val beats1 = numBeats1(bits)
val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W))
val counter1 = counter - 1.U
val first = counter === 0.U
val last = counter === 1.U || beats1 === 0.U
val done = last && fire
val count = (beats1 & ~counter1)
when (fire) {
counter := Mux(first, beats1, counter1)
}
(first, last, done, count)
}
def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1
def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire)
def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid)
def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2
def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire)
def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid)
def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3
def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire)
def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid)
def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3)
}
def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire)
def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid)
def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4)
}
def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire)
def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid)
def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes))
}
def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire)
def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid)
// Does the request need T permissions to be executed?
def needT(a: TLBundleA): Bool = {
val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLPermissions.NtoB -> false.B,
TLPermissions.NtoT -> true.B,
TLPermissions.BtoT -> true.B))
MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array(
TLMessages.PutFullData -> true.B,
TLMessages.PutPartialData -> true.B,
TLMessages.ArithmeticData -> true.B,
TLMessages.LogicalData -> true.B,
TLMessages.Get -> false.B,
TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLHints.PREFETCH_READ -> false.B,
TLHints.PREFETCH_WRITE -> true.B)),
TLMessages.AcquireBlock -> acq_needT,
TLMessages.AcquirePerm -> acq_needT))
}
// This is a very expensive circuit; use only if you really mean it!
def inFlight(x: TLBundle): (UInt, UInt) = {
val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W))
val bce = manager.anySupportAcquireB && client.anySupportProbe
val (a_first, a_last, _) = firstlast(x.a)
val (b_first, b_last, _) = firstlast(x.b)
val (c_first, c_last, _) = firstlast(x.c)
val (d_first, d_last, _) = firstlast(x.d)
val (e_first, e_last, _) = firstlast(x.e)
val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits))
val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits))
val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits))
val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits))
val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits))
val a_inc = x.a.fire && a_first && a_request
val b_inc = x.b.fire && b_first && b_request
val c_inc = x.c.fire && c_first && c_request
val d_inc = x.d.fire && d_first && d_request
val e_inc = x.e.fire && e_first && e_request
val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil))
val a_dec = x.a.fire && a_last && a_response
val b_dec = x.b.fire && b_last && b_response
val c_dec = x.c.fire && c_last && c_response
val d_dec = x.d.fire && d_last && d_response
val e_dec = x.e.fire && e_last && e_response
val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil))
val next_flight = flight + PopCount(inc) - PopCount(dec)
flight := next_flight
(flight, next_flight)
}
def prettySourceMapping(context: String): String = {
s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n"
}
}
class TLEdgeOut(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
// Transfers
def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquireBlock
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquirePerm
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.Release
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ReleaseData
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) =
Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B)
def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAck
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions, data)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAckData
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B)
def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink)
def GrantAck(toSink: UInt): TLBundleE = {
val e = Wire(new TLBundleE(bundle))
e.sink := toSink
e
}
// Accesses
def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsGetFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Get
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutFullFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutFullData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, mask, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutPartialFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutPartialData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = {
require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsArithmeticFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.ArithmeticData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsLogicalFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.LogicalData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = {
require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsHintFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Hint
a.param := param
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data)
def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAckData
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size)
def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.HintAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
}
class TLEdgeIn(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = {
val todo = x.filter(!_.isEmpty)
val heads = todo.map(_.head)
val tails = todo.map(_.tail)
if (todo.isEmpty) Nil else { heads +: myTranspose(tails) }
}
// Transfers
def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = {
require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}")
val legal = client.supportsProbe(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Probe
b.param := capPermissions
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.Grant
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.GrantData
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B)
def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.ReleaseAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
// Accesses
def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = {
require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsGet(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Get
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsPutFull(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutFullData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, mask, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsPutPartial(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutPartialData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsArithmetic(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.ArithmeticData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsLogical(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.LogicalData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = {
require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsHint(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Hint
b.param := param
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size)
def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied)
def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data)
def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAckData
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B)
def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied)
def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B)
def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.HintAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
}
| module TLMonitor_26( // @[Monitor.scala:36:7]
input clock, // @[Monitor.scala:36:7]
input reset, // @[Monitor.scala:36:7]
input io_in_a_ready, // @[Monitor.scala:20:14]
input io_in_a_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_size, // @[Monitor.scala:20:14]
input [6:0] io_in_a_bits_source, // @[Monitor.scala:20:14]
input [16:0] io_in_a_bits_address, // @[Monitor.scala:20:14]
input [7:0] io_in_a_bits_mask, // @[Monitor.scala:20:14]
input [63:0] io_in_a_bits_data, // @[Monitor.scala:20:14]
input io_in_a_bits_corrupt, // @[Monitor.scala:20:14]
input io_in_d_ready, // @[Monitor.scala:20:14]
input io_in_d_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_size, // @[Monitor.scala:20:14]
input [6:0] io_in_d_bits_source, // @[Monitor.scala:20:14]
input [63:0] io_in_d_bits_data // @[Monitor.scala:20:14]
);
wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11]
wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11]
wire io_in_a_ready_0 = io_in_a_ready; // @[Monitor.scala:36:7]
wire io_in_a_valid_0 = io_in_a_valid; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_opcode_0 = io_in_a_bits_opcode; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_param_0 = io_in_a_bits_param; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_size_0 = io_in_a_bits_size; // @[Monitor.scala:36:7]
wire [6:0] io_in_a_bits_source_0 = io_in_a_bits_source; // @[Monitor.scala:36:7]
wire [16:0] io_in_a_bits_address_0 = io_in_a_bits_address; // @[Monitor.scala:36:7]
wire [7:0] io_in_a_bits_mask_0 = io_in_a_bits_mask; // @[Monitor.scala:36:7]
wire [63:0] io_in_a_bits_data_0 = io_in_a_bits_data; // @[Monitor.scala:36:7]
wire io_in_a_bits_corrupt_0 = io_in_a_bits_corrupt; // @[Monitor.scala:36:7]
wire io_in_d_ready_0 = io_in_d_ready; // @[Monitor.scala:36:7]
wire io_in_d_valid_0 = io_in_d_valid; // @[Monitor.scala:36:7]
wire [2:0] io_in_d_bits_size_0 = io_in_d_bits_size; // @[Monitor.scala:36:7]
wire [6:0] io_in_d_bits_source_0 = io_in_d_bits_source; // @[Monitor.scala:36:7]
wire [63:0] io_in_d_bits_data_0 = io_in_d_bits_data; // @[Monitor.scala:36:7]
wire io_in_d_bits_sink = 1'h0; // @[Monitor.scala:36:7]
wire io_in_d_bits_denied = 1'h0; // @[Monitor.scala:36:7]
wire io_in_d_bits_corrupt = 1'h0; // @[Monitor.scala:36:7]
wire sink_ok = 1'h0; // @[Monitor.scala:309:31]
wire d_release_ack = 1'h0; // @[Monitor.scala:673:46]
wire _c_first_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_T = 1'h0; // @[Decoupled.scala:51:35]
wire c_first_beats1_opdata = 1'h0; // @[Edges.scala:102:36]
wire _c_first_last_T = 1'h0; // @[Edges.scala:232:25]
wire c_first_done = 1'h0; // @[Edges.scala:233:22]
wire _c_set_wo_ready_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_wo_ready_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_wo_ready_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_T = 1'h0; // @[Monitor.scala:772:47]
wire _c_probe_ack_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_T_1 = 1'h0; // @[Monitor.scala:772:95]
wire c_probe_ack = 1'h0; // @[Monitor.scala:772:71]
wire d_release_ack_1 = 1'h0; // @[Monitor.scala:783:46]
wire _same_cycle_resp_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_T_3 = 1'h0; // @[Monitor.scala:795:44]
wire _same_cycle_resp_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_T_4 = 1'h0; // @[Edges.scala:68:36]
wire _same_cycle_resp_T_5 = 1'h0; // @[Edges.scala:68:51]
wire _same_cycle_resp_T_6 = 1'h0; // @[Edges.scala:68:40]
wire _same_cycle_resp_T_7 = 1'h0; // @[Monitor.scala:795:55]
wire _same_cycle_resp_WIRE_4_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_4_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_4_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_5_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_5_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_5_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire same_cycle_resp_1 = 1'h0; // @[Monitor.scala:795:88]
wire [2:0] a_first_beats1 = 3'h0; // @[Edges.scala:221:14]
wire [2:0] a_first_count = 3'h0; // @[Edges.scala:234:25]
wire [2:0] a_first_beats1_1 = 3'h0; // @[Edges.scala:221:14]
wire [2:0] a_first_count_1 = 3'h0; // @[Edges.scala:234:25]
wire [2:0] responseMap_0 = 3'h0; // @[Monitor.scala:643:42]
wire [2:0] responseMap_1 = 3'h0; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_0 = 3'h0; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_1 = 3'h0; // @[Monitor.scala:644:42]
wire [2:0] _c_first_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] c_first_beats1_decode = 3'h0; // @[Edges.scala:220:59]
wire [2:0] c_first_beats1 = 3'h0; // @[Edges.scala:221:14]
wire [2:0] _c_first_count_T = 3'h0; // @[Edges.scala:234:27]
wire [2:0] c_first_count = 3'h0; // @[Edges.scala:234:25]
wire [2:0] _c_first_counter_T = 3'h0; // @[Edges.scala:236:21]
wire [2:0] _c_set_wo_ready_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_4_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_4_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_4_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_5_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_5_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_5_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire _source_ok_T_3 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_5 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_9 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_11 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_15 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_17 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_21 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_23 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_28 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_30 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_44 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_46 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_50 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_52 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_56 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_58 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_62 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_64 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_69 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_71 = 1'h1; // @[Parameters.scala:57:20]
wire _a_first_last_T_1 = 1'h1; // @[Edges.scala:232:43]
wire a_first_last = 1'h1; // @[Edges.scala:232:33]
wire d_first_beats1_opdata = 1'h1; // @[Edges.scala:106:36]
wire _a_first_last_T_3 = 1'h1; // @[Edges.scala:232:43]
wire a_first_last_1 = 1'h1; // @[Edges.scala:232:33]
wire d_first_beats1_opdata_1 = 1'h1; // @[Edges.scala:106:36]
wire c_first = 1'h1; // @[Edges.scala:231:25]
wire _c_first_last_T_1 = 1'h1; // @[Edges.scala:232:43]
wire c_first_last = 1'h1; // @[Edges.scala:232:33]
wire d_first_beats1_opdata_2 = 1'h1; // @[Edges.scala:106:36]
wire [2:0] c_first_counter1 = 3'h7; // @[Edges.scala:230:28]
wire [3:0] _c_first_counter1_T = 4'hF; // @[Edges.scala:230:28]
wire [1:0] io_in_d_bits_param = 2'h0; // @[Monitor.scala:36:7]
wire [2:0] io_in_d_bits_opcode = 3'h1; // @[Monitor.scala:36:7]
wire [2:0] responseMap_2 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMap_3 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMap_4 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_2 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_3 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_4 = 3'h1; // @[Monitor.scala:644:42]
wire [259:0] _inflight_opcodes_T_4 = 260'hFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF; // @[Monitor.scala:815:62]
wire [259:0] _inflight_sizes_T_4 = 260'hFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF; // @[Monitor.scala:816:58]
wire [64:0] _inflight_T_4 = 65'h1FFFFFFFFFFFFFFFF; // @[Monitor.scala:814:46]
wire [63:0] _c_first_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_first_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_first_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_first_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_set_wo_ready_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_set_wo_ready_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_opcodes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_opcodes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_sizes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_sizes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_opcodes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_opcodes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_sizes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_sizes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_probe_ack_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_probe_ack_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_probe_ack_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_probe_ack_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_4_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_5_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [16:0] _c_first_WIRE_bits_address = 17'h0; // @[Bundles.scala:265:74]
wire [16:0] _c_first_WIRE_1_bits_address = 17'h0; // @[Bundles.scala:265:61]
wire [16:0] _c_first_WIRE_2_bits_address = 17'h0; // @[Bundles.scala:265:74]
wire [16:0] _c_first_WIRE_3_bits_address = 17'h0; // @[Bundles.scala:265:61]
wire [16:0] _c_set_wo_ready_WIRE_bits_address = 17'h0; // @[Bundles.scala:265:74]
wire [16:0] _c_set_wo_ready_WIRE_1_bits_address = 17'h0; // @[Bundles.scala:265:61]
wire [16:0] _c_set_WIRE_bits_address = 17'h0; // @[Bundles.scala:265:74]
wire [16:0] _c_set_WIRE_1_bits_address = 17'h0; // @[Bundles.scala:265:61]
wire [16:0] _c_opcodes_set_interm_WIRE_bits_address = 17'h0; // @[Bundles.scala:265:74]
wire [16:0] _c_opcodes_set_interm_WIRE_1_bits_address = 17'h0; // @[Bundles.scala:265:61]
wire [16:0] _c_sizes_set_interm_WIRE_bits_address = 17'h0; // @[Bundles.scala:265:74]
wire [16:0] _c_sizes_set_interm_WIRE_1_bits_address = 17'h0; // @[Bundles.scala:265:61]
wire [16:0] _c_opcodes_set_WIRE_bits_address = 17'h0; // @[Bundles.scala:265:74]
wire [16:0] _c_opcodes_set_WIRE_1_bits_address = 17'h0; // @[Bundles.scala:265:61]
wire [16:0] _c_sizes_set_WIRE_bits_address = 17'h0; // @[Bundles.scala:265:74]
wire [16:0] _c_sizes_set_WIRE_1_bits_address = 17'h0; // @[Bundles.scala:265:61]
wire [16:0] _c_probe_ack_WIRE_bits_address = 17'h0; // @[Bundles.scala:265:74]
wire [16:0] _c_probe_ack_WIRE_1_bits_address = 17'h0; // @[Bundles.scala:265:61]
wire [16:0] _c_probe_ack_WIRE_2_bits_address = 17'h0; // @[Bundles.scala:265:74]
wire [16:0] _c_probe_ack_WIRE_3_bits_address = 17'h0; // @[Bundles.scala:265:61]
wire [16:0] _same_cycle_resp_WIRE_bits_address = 17'h0; // @[Bundles.scala:265:74]
wire [16:0] _same_cycle_resp_WIRE_1_bits_address = 17'h0; // @[Bundles.scala:265:61]
wire [16:0] _same_cycle_resp_WIRE_2_bits_address = 17'h0; // @[Bundles.scala:265:74]
wire [16:0] _same_cycle_resp_WIRE_3_bits_address = 17'h0; // @[Bundles.scala:265:61]
wire [16:0] _same_cycle_resp_WIRE_4_bits_address = 17'h0; // @[Bundles.scala:265:74]
wire [16:0] _same_cycle_resp_WIRE_5_bits_address = 17'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_first_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_first_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_first_WIRE_2_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_first_WIRE_3_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_set_wo_ready_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_set_wo_ready_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_set_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_set_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_opcodes_set_interm_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_opcodes_set_interm_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_sizes_set_interm_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_sizes_set_interm_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_opcodes_set_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_opcodes_set_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_sizes_set_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_sizes_set_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_probe_ack_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_probe_ack_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_probe_ack_WIRE_2_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_probe_ack_WIRE_3_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _same_cycle_resp_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _same_cycle_resp_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _same_cycle_resp_WIRE_2_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _same_cycle_resp_WIRE_3_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _same_cycle_resp_WIRE_4_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _same_cycle_resp_WIRE_5_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [259:0] c_opcodes_set = 260'h0; // @[Monitor.scala:740:34]
wire [259:0] c_sizes_set = 260'h0; // @[Monitor.scala:741:34]
wire [259:0] d_opcodes_clr_1 = 260'h0; // @[Monitor.scala:776:34]
wire [259:0] d_sizes_clr_1 = 260'h0; // @[Monitor.scala:777:34]
wire [15:0] _a_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _a_size_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _d_opcodes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _d_sizes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _c_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _c_size_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _d_opcodes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _d_sizes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57]
wire [16:0] _a_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _a_size_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _d_opcodes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _d_sizes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _c_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _c_size_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _d_opcodes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _d_sizes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57]
wire [15:0] _a_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _a_size_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _d_opcodes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _d_sizes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _c_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _c_size_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _d_opcodes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _d_sizes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51]
wire [64:0] c_set = 65'h0; // @[Monitor.scala:738:34]
wire [64:0] c_set_wo_ready = 65'h0; // @[Monitor.scala:739:34]
wire [64:0] d_clr_1 = 65'h0; // @[Monitor.scala:774:34]
wire [64:0] d_clr_wo_ready_1 = 65'h0; // @[Monitor.scala:775:34]
wire [1026:0] _c_opcodes_set_T_1 = 1027'h0; // @[Monitor.scala:767:54]
wire [1026:0] _c_sizes_set_T_1 = 1027'h0; // @[Monitor.scala:768:52]
wire [9:0] _c_opcodes_set_T = 10'h0; // @[Monitor.scala:767:79]
wire [9:0] _c_sizes_set_T = 10'h0; // @[Monitor.scala:768:77]
wire [3:0] _c_opcodes_set_interm_T_1 = 4'h1; // @[Monitor.scala:765:61]
wire [3:0] _c_sizes_set_interm_T_1 = 4'h1; // @[Monitor.scala:766:59]
wire [3:0] c_opcodes_set_interm = 4'h0; // @[Monitor.scala:754:40]
wire [3:0] c_sizes_set_interm = 4'h0; // @[Monitor.scala:755:40]
wire [3:0] _c_opcodes_set_interm_T = 4'h0; // @[Monitor.scala:765:53]
wire [3:0] _c_sizes_set_interm_T = 4'h0; // @[Monitor.scala:766:51]
wire [127:0] _c_set_wo_ready_T = 128'h1; // @[OneHot.scala:58:35]
wire [127:0] _c_set_T = 128'h1; // @[OneHot.scala:58:35]
wire [5:0] _c_first_beats1_decode_T_2 = 6'h0; // @[package.scala:243:46]
wire [5:0] _c_first_beats1_decode_T_1 = 6'h3F; // @[package.scala:243:76]
wire [12:0] _c_first_beats1_decode_T = 13'h3F; // @[package.scala:243:71]
wire [2:0] responseMap_6 = 3'h4; // @[Monitor.scala:643:42]
wire [2:0] responseMap_7 = 3'h4; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_7 = 3'h4; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_6 = 3'h5; // @[Monitor.scala:644:42]
wire [2:0] responseMap_5 = 3'h2; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_5 = 3'h2; // @[Monitor.scala:644:42]
wire [3:0] _a_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:637:123]
wire [3:0] _a_size_lookup_T_2 = 4'h4; // @[Monitor.scala:641:117]
wire [3:0] _d_opcodes_clr_T = 4'h4; // @[Monitor.scala:680:48]
wire [3:0] _d_sizes_clr_T = 4'h4; // @[Monitor.scala:681:48]
wire [3:0] _c_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:749:123]
wire [3:0] _c_size_lookup_T_2 = 4'h4; // @[Monitor.scala:750:119]
wire [3:0] _d_opcodes_clr_T_6 = 4'h4; // @[Monitor.scala:790:48]
wire [3:0] _d_sizes_clr_T_6 = 4'h4; // @[Monitor.scala:791:48]
wire [2:0] _mask_sizeOH_T = io_in_a_bits_size_0; // @[Misc.scala:202:34]
wire [6:0] _source_ok_uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_4 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_4 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_5 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_6 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_7 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_8 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_9 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_10 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_11 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_12 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_13 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_14 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_15 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_16 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_17 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_18 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_19 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_20 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_21 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_22 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_23 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_24 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_25 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_26 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_27 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_28 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_29 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_30 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_31 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_32 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_33 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_34 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_35 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_36 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_37 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_38 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_39 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_40 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_41 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_42 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_43 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_44 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_45 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_46 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_47 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_48 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_49 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_50 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_51 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_52 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_53 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_54 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_5 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_6 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_7 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_8 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_9 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire _source_ok_T = io_in_a_bits_source_0 == 7'h10; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_0 = _source_ok_T; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits = _source_ok_uncommonBits_T[1:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] _source_ok_T_1 = io_in_a_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_7 = io_in_a_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_13 = io_in_a_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_19 = io_in_a_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_26 = io_in_a_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire _source_ok_T_2 = _source_ok_T_1 == 5'h0; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_4 = _source_ok_T_2; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_6 = _source_ok_T_4; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1 = _source_ok_T_6; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_1 = _source_ok_uncommonBits_T_1[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_8 = _source_ok_T_7 == 5'h1; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_10 = _source_ok_T_8; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_12 = _source_ok_T_10; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_2 = _source_ok_T_12; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_2 = _source_ok_uncommonBits_T_2[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_14 = _source_ok_T_13 == 5'h2; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_16 = _source_ok_T_14; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_18 = _source_ok_T_16; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_3 = _source_ok_T_18; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_3 = _source_ok_uncommonBits_T_3[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_20 = _source_ok_T_19 == 5'h3; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_22 = _source_ok_T_20; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_24 = _source_ok_T_22; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_4 = _source_ok_T_24; // @[Parameters.scala:1138:31]
wire _source_ok_T_25 = io_in_a_bits_source_0 == 7'h28; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_5 = _source_ok_T_25; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_4 = _source_ok_uncommonBits_T_4[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_27 = _source_ok_T_26 == 5'h8; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_29 = _source_ok_T_27; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_31 = _source_ok_T_29; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_6 = _source_ok_T_31; // @[Parameters.scala:1138:31]
wire _source_ok_T_32 = io_in_a_bits_source_0 == 7'h24; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_7 = _source_ok_T_32; // @[Parameters.scala:1138:31]
wire _source_ok_T_33 = io_in_a_bits_source_0 == 7'h40; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_8 = _source_ok_T_33; // @[Parameters.scala:1138:31]
wire _source_ok_T_34 = _source_ok_WIRE_0 | _source_ok_WIRE_1; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_35 = _source_ok_T_34 | _source_ok_WIRE_2; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_36 = _source_ok_T_35 | _source_ok_WIRE_3; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_37 = _source_ok_T_36 | _source_ok_WIRE_4; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_38 = _source_ok_T_37 | _source_ok_WIRE_5; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_39 = _source_ok_T_38 | _source_ok_WIRE_6; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_40 = _source_ok_T_39 | _source_ok_WIRE_7; // @[Parameters.scala:1138:31, :1139:46]
wire source_ok = _source_ok_T_40 | _source_ok_WIRE_8; // @[Parameters.scala:1138:31, :1139:46]
wire [12:0] _GEN = 13'h3F << io_in_a_bits_size_0; // @[package.scala:243:71]
wire [12:0] _is_aligned_mask_T; // @[package.scala:243:71]
assign _is_aligned_mask_T = _GEN; // @[package.scala:243:71]
wire [12:0] _a_first_beats1_decode_T; // @[package.scala:243:71]
assign _a_first_beats1_decode_T = _GEN; // @[package.scala:243:71]
wire [12:0] _a_first_beats1_decode_T_3; // @[package.scala:243:71]
assign _a_first_beats1_decode_T_3 = _GEN; // @[package.scala:243:71]
wire [5:0] _is_aligned_mask_T_1 = _is_aligned_mask_T[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] is_aligned_mask = ~_is_aligned_mask_T_1; // @[package.scala:243:{46,76}]
wire [16:0] _is_aligned_T = {11'h0, io_in_a_bits_address_0[5:0] & is_aligned_mask}; // @[package.scala:243:46]
wire is_aligned = _is_aligned_T == 17'h0; // @[Edges.scala:21:{16,24}]
wire [1:0] mask_sizeOH_shiftAmount = _mask_sizeOH_T[1:0]; // @[OneHot.scala:64:49]
wire [3:0] _mask_sizeOH_T_1 = 4'h1 << mask_sizeOH_shiftAmount; // @[OneHot.scala:64:49, :65:12]
wire [2:0] _mask_sizeOH_T_2 = _mask_sizeOH_T_1[2:0]; // @[OneHot.scala:65:{12,27}]
wire [2:0] mask_sizeOH = {_mask_sizeOH_T_2[2:1], 1'h1}; // @[OneHot.scala:65:27]
wire mask_sub_sub_sub_0_1 = io_in_a_bits_size_0 > 3'h2; // @[Misc.scala:206:21]
wire mask_sub_sub_size = mask_sizeOH[2]; // @[Misc.scala:202:81, :209:26]
wire mask_sub_sub_bit = io_in_a_bits_address_0[2]; // @[Misc.scala:210:26]
wire mask_sub_sub_1_2 = mask_sub_sub_bit; // @[Misc.scala:210:26, :214:27]
wire mask_sub_sub_nbit = ~mask_sub_sub_bit; // @[Misc.scala:210:26, :211:20]
wire mask_sub_sub_0_2 = mask_sub_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_sub_acc_T = mask_sub_sub_size & mask_sub_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_sub_0_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T; // @[Misc.scala:206:21, :215:{29,38}]
wire _mask_sub_sub_acc_T_1 = mask_sub_sub_size & mask_sub_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_sub_1_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T_1; // @[Misc.scala:206:21, :215:{29,38}]
wire mask_sub_size = mask_sizeOH[1]; // @[Misc.scala:202:81, :209:26]
wire mask_sub_bit = io_in_a_bits_address_0[1]; // @[Misc.scala:210:26]
wire mask_sub_nbit = ~mask_sub_bit; // @[Misc.scala:210:26, :211:20]
wire mask_sub_0_2 = mask_sub_sub_0_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_acc_T = mask_sub_size & mask_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_0_1 = mask_sub_sub_0_1 | _mask_sub_acc_T; // @[Misc.scala:215:{29,38}]
wire mask_sub_1_2 = mask_sub_sub_0_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_sub_acc_T_1 = mask_sub_size & mask_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_1_1 = mask_sub_sub_0_1 | _mask_sub_acc_T_1; // @[Misc.scala:215:{29,38}]
wire mask_sub_2_2 = mask_sub_sub_1_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_acc_T_2 = mask_sub_size & mask_sub_2_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_2_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_2; // @[Misc.scala:215:{29,38}]
wire mask_sub_3_2 = mask_sub_sub_1_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_sub_acc_T_3 = mask_sub_size & mask_sub_3_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_3_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_3; // @[Misc.scala:215:{29,38}]
wire mask_size = mask_sizeOH[0]; // @[Misc.scala:202:81, :209:26]
wire mask_bit = io_in_a_bits_address_0[0]; // @[Misc.scala:210:26]
wire mask_nbit = ~mask_bit; // @[Misc.scala:210:26, :211:20]
wire mask_eq = mask_sub_0_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T = mask_size & mask_eq; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc = mask_sub_0_1 | _mask_acc_T; // @[Misc.scala:215:{29,38}]
wire mask_eq_1 = mask_sub_0_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_1 = mask_size & mask_eq_1; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_1 = mask_sub_0_1 | _mask_acc_T_1; // @[Misc.scala:215:{29,38}]
wire mask_eq_2 = mask_sub_1_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_2 = mask_size & mask_eq_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_2 = mask_sub_1_1 | _mask_acc_T_2; // @[Misc.scala:215:{29,38}]
wire mask_eq_3 = mask_sub_1_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_3 = mask_size & mask_eq_3; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_3 = mask_sub_1_1 | _mask_acc_T_3; // @[Misc.scala:215:{29,38}]
wire mask_eq_4 = mask_sub_2_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_4 = mask_size & mask_eq_4; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_4 = mask_sub_2_1 | _mask_acc_T_4; // @[Misc.scala:215:{29,38}]
wire mask_eq_5 = mask_sub_2_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_5 = mask_size & mask_eq_5; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_5 = mask_sub_2_1 | _mask_acc_T_5; // @[Misc.scala:215:{29,38}]
wire mask_eq_6 = mask_sub_3_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_6 = mask_size & mask_eq_6; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_6 = mask_sub_3_1 | _mask_acc_T_6; // @[Misc.scala:215:{29,38}]
wire mask_eq_7 = mask_sub_3_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_7 = mask_size & mask_eq_7; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_7 = mask_sub_3_1 | _mask_acc_T_7; // @[Misc.scala:215:{29,38}]
wire [1:0] mask_lo_lo = {mask_acc_1, mask_acc}; // @[Misc.scala:215:29, :222:10]
wire [1:0] mask_lo_hi = {mask_acc_3, mask_acc_2}; // @[Misc.scala:215:29, :222:10]
wire [3:0] mask_lo = {mask_lo_hi, mask_lo_lo}; // @[Misc.scala:222:10]
wire [1:0] mask_hi_lo = {mask_acc_5, mask_acc_4}; // @[Misc.scala:215:29, :222:10]
wire [1:0] mask_hi_hi = {mask_acc_7, mask_acc_6}; // @[Misc.scala:215:29, :222:10]
wire [3:0] mask_hi = {mask_hi_hi, mask_hi_lo}; // @[Misc.scala:222:10]
wire [7:0] mask = {mask_hi, mask_lo}; // @[Misc.scala:222:10]
wire [1:0] uncommonBits = _uncommonBits_T[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_1 = _uncommonBits_T_1[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_2 = _uncommonBits_T_2[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_3 = _uncommonBits_T_3[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_4 = _uncommonBits_T_4[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_5 = _uncommonBits_T_5[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_6 = _uncommonBits_T_6[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_7 = _uncommonBits_T_7[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_8 = _uncommonBits_T_8[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_9 = _uncommonBits_T_9[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_10 = _uncommonBits_T_10[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_11 = _uncommonBits_T_11[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_12 = _uncommonBits_T_12[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_13 = _uncommonBits_T_13[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_14 = _uncommonBits_T_14[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_15 = _uncommonBits_T_15[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_16 = _uncommonBits_T_16[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_17 = _uncommonBits_T_17[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_18 = _uncommonBits_T_18[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_19 = _uncommonBits_T_19[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_20 = _uncommonBits_T_20[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_21 = _uncommonBits_T_21[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_22 = _uncommonBits_T_22[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_23 = _uncommonBits_T_23[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_24 = _uncommonBits_T_24[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_25 = _uncommonBits_T_25[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_26 = _uncommonBits_T_26[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_27 = _uncommonBits_T_27[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_28 = _uncommonBits_T_28[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_29 = _uncommonBits_T_29[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_30 = _uncommonBits_T_30[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_31 = _uncommonBits_T_31[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_32 = _uncommonBits_T_32[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_33 = _uncommonBits_T_33[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_34 = _uncommonBits_T_34[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_35 = _uncommonBits_T_35[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_36 = _uncommonBits_T_36[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_37 = _uncommonBits_T_37[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_38 = _uncommonBits_T_38[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_39 = _uncommonBits_T_39[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_40 = _uncommonBits_T_40[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_41 = _uncommonBits_T_41[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_42 = _uncommonBits_T_42[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_43 = _uncommonBits_T_43[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_44 = _uncommonBits_T_44[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_45 = _uncommonBits_T_45[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_46 = _uncommonBits_T_46[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_47 = _uncommonBits_T_47[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_48 = _uncommonBits_T_48[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_49 = _uncommonBits_T_49[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_50 = _uncommonBits_T_50[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_51 = _uncommonBits_T_51[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_52 = _uncommonBits_T_52[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_53 = _uncommonBits_T_53[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_54 = _uncommonBits_T_54[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_41 = io_in_d_bits_source_0 == 7'h10; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_0 = _source_ok_T_41; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_5 = _source_ok_uncommonBits_T_5[1:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] _source_ok_T_42 = io_in_d_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_48 = io_in_d_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_54 = io_in_d_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_60 = io_in_d_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_67 = io_in_d_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire _source_ok_T_43 = _source_ok_T_42 == 5'h0; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_45 = _source_ok_T_43; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_47 = _source_ok_T_45; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_1 = _source_ok_T_47; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_6 = _source_ok_uncommonBits_T_6[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_49 = _source_ok_T_48 == 5'h1; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_51 = _source_ok_T_49; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_53 = _source_ok_T_51; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_2 = _source_ok_T_53; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_7 = _source_ok_uncommonBits_T_7[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_55 = _source_ok_T_54 == 5'h2; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_57 = _source_ok_T_55; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_59 = _source_ok_T_57; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_3 = _source_ok_T_59; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_8 = _source_ok_uncommonBits_T_8[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_61 = _source_ok_T_60 == 5'h3; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_63 = _source_ok_T_61; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_65 = _source_ok_T_63; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_4 = _source_ok_T_65; // @[Parameters.scala:1138:31]
wire _source_ok_T_66 = io_in_d_bits_source_0 == 7'h28; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_5 = _source_ok_T_66; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_9 = _source_ok_uncommonBits_T_9[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_68 = _source_ok_T_67 == 5'h8; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_70 = _source_ok_T_68; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_72 = _source_ok_T_70; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_6 = _source_ok_T_72; // @[Parameters.scala:1138:31]
wire _source_ok_T_73 = io_in_d_bits_source_0 == 7'h24; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_7 = _source_ok_T_73; // @[Parameters.scala:1138:31]
wire _source_ok_T_74 = io_in_d_bits_source_0 == 7'h40; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_8 = _source_ok_T_74; // @[Parameters.scala:1138:31]
wire _source_ok_T_75 = _source_ok_WIRE_1_0 | _source_ok_WIRE_1_1; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_76 = _source_ok_T_75 | _source_ok_WIRE_1_2; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_77 = _source_ok_T_76 | _source_ok_WIRE_1_3; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_78 = _source_ok_T_77 | _source_ok_WIRE_1_4; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_79 = _source_ok_T_78 | _source_ok_WIRE_1_5; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_80 = _source_ok_T_79 | _source_ok_WIRE_1_6; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_81 = _source_ok_T_80 | _source_ok_WIRE_1_7; // @[Parameters.scala:1138:31, :1139:46]
wire source_ok_1 = _source_ok_T_81 | _source_ok_WIRE_1_8; // @[Parameters.scala:1138:31, :1139:46]
wire _T_1143 = io_in_a_ready_0 & io_in_a_valid_0; // @[Decoupled.scala:51:35]
wire _a_first_T; // @[Decoupled.scala:51:35]
assign _a_first_T = _T_1143; // @[Decoupled.scala:51:35]
wire _a_first_T_1; // @[Decoupled.scala:51:35]
assign _a_first_T_1 = _T_1143; // @[Decoupled.scala:51:35]
wire a_first_done = _a_first_T; // @[Decoupled.scala:51:35]
wire [5:0] _a_first_beats1_decode_T_1 = _a_first_beats1_decode_T[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _a_first_beats1_decode_T_2 = ~_a_first_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire [2:0] a_first_beats1_decode = _a_first_beats1_decode_T_2[5:3]; // @[package.scala:243:46]
wire _a_first_beats1_opdata_T = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7]
wire _a_first_beats1_opdata_T_1 = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7]
wire a_first_beats1_opdata = ~_a_first_beats1_opdata_T; // @[Edges.scala:92:{28,37}]
reg [2:0] a_first_counter; // @[Edges.scala:229:27]
wire [3:0] _a_first_counter1_T = {1'h0, a_first_counter} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] a_first_counter1 = _a_first_counter1_T[2:0]; // @[Edges.scala:230:28]
wire a_first = a_first_counter == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _a_first_last_T = a_first_counter == 3'h1; // @[Edges.scala:229:27, :232:25]
wire [2:0] _a_first_count_T = ~a_first_counter1; // @[Edges.scala:230:28, :234:27]
wire [2:0] _a_first_counter_T = a_first ? 3'h0 : a_first_counter1; // @[Edges.scala:230:28, :231:25, :236:21]
reg [2:0] opcode; // @[Monitor.scala:387:22]
reg [2:0] param; // @[Monitor.scala:388:22]
reg [2:0] size; // @[Monitor.scala:389:22]
reg [6:0] source; // @[Monitor.scala:390:22]
reg [16:0] address; // @[Monitor.scala:391:22]
wire _T_1211 = io_in_d_ready_0 & io_in_d_valid_0; // @[Decoupled.scala:51:35]
wire _d_first_T; // @[Decoupled.scala:51:35]
assign _d_first_T = _T_1211; // @[Decoupled.scala:51:35]
wire _d_first_T_1; // @[Decoupled.scala:51:35]
assign _d_first_T_1 = _T_1211; // @[Decoupled.scala:51:35]
wire _d_first_T_2; // @[Decoupled.scala:51:35]
assign _d_first_T_2 = _T_1211; // @[Decoupled.scala:51:35]
wire [12:0] _GEN_0 = 13'h3F << io_in_d_bits_size_0; // @[package.scala:243:71]
wire [12:0] _d_first_beats1_decode_T; // @[package.scala:243:71]
assign _d_first_beats1_decode_T = _GEN_0; // @[package.scala:243:71]
wire [12:0] _d_first_beats1_decode_T_3; // @[package.scala:243:71]
assign _d_first_beats1_decode_T_3 = _GEN_0; // @[package.scala:243:71]
wire [12:0] _d_first_beats1_decode_T_6; // @[package.scala:243:71]
assign _d_first_beats1_decode_T_6 = _GEN_0; // @[package.scala:243:71]
wire [5:0] _d_first_beats1_decode_T_1 = _d_first_beats1_decode_T[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _d_first_beats1_decode_T_2 = ~_d_first_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire [2:0] d_first_beats1_decode = _d_first_beats1_decode_T_2[5:3]; // @[package.scala:243:46]
wire [2:0] d_first_beats1 = d_first_beats1_decode; // @[Edges.scala:220:59, :221:14]
reg [2:0] d_first_counter; // @[Edges.scala:229:27]
wire [3:0] _d_first_counter1_T = {1'h0, d_first_counter} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] d_first_counter1 = _d_first_counter1_T[2:0]; // @[Edges.scala:230:28]
wire d_first = d_first_counter == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T = d_first_counter == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_1 = d_first_beats1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last = _d_first_last_T | _d_first_last_T_1; // @[Edges.scala:232:{25,33,43}]
wire d_first_done = d_first_last & _d_first_T; // @[Decoupled.scala:51:35]
wire [2:0] _d_first_count_T = ~d_first_counter1; // @[Edges.scala:230:28, :234:27]
wire [2:0] d_first_count = d_first_beats1 & _d_first_count_T; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _d_first_counter_T = d_first ? d_first_beats1 : d_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
reg [2:0] size_1; // @[Monitor.scala:540:22]
reg [6:0] source_1; // @[Monitor.scala:541:22]
reg [64:0] inflight; // @[Monitor.scala:614:27]
reg [259:0] inflight_opcodes; // @[Monitor.scala:616:35]
reg [259:0] inflight_sizes; // @[Monitor.scala:618:33]
wire a_first_done_1 = _a_first_T_1; // @[Decoupled.scala:51:35]
wire [5:0] _a_first_beats1_decode_T_4 = _a_first_beats1_decode_T_3[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _a_first_beats1_decode_T_5 = ~_a_first_beats1_decode_T_4; // @[package.scala:243:{46,76}]
wire [2:0] a_first_beats1_decode_1 = _a_first_beats1_decode_T_5[5:3]; // @[package.scala:243:46]
wire a_first_beats1_opdata_1 = ~_a_first_beats1_opdata_T_1; // @[Edges.scala:92:{28,37}]
reg [2:0] a_first_counter_1; // @[Edges.scala:229:27]
wire [3:0] _a_first_counter1_T_1 = {1'h0, a_first_counter_1} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] a_first_counter1_1 = _a_first_counter1_T_1[2:0]; // @[Edges.scala:230:28]
wire a_first_1 = a_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _a_first_last_T_2 = a_first_counter_1 == 3'h1; // @[Edges.scala:229:27, :232:25]
wire [2:0] _a_first_count_T_1 = ~a_first_counter1_1; // @[Edges.scala:230:28, :234:27]
wire [2:0] _a_first_counter_T_1 = a_first_1 ? 3'h0 : a_first_counter1_1; // @[Edges.scala:230:28, :231:25, :236:21]
wire [5:0] _d_first_beats1_decode_T_4 = _d_first_beats1_decode_T_3[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _d_first_beats1_decode_T_5 = ~_d_first_beats1_decode_T_4; // @[package.scala:243:{46,76}]
wire [2:0] d_first_beats1_decode_1 = _d_first_beats1_decode_T_5[5:3]; // @[package.scala:243:46]
wire [2:0] d_first_beats1_1 = d_first_beats1_decode_1; // @[Edges.scala:220:59, :221:14]
reg [2:0] d_first_counter_1; // @[Edges.scala:229:27]
wire [3:0] _d_first_counter1_T_1 = {1'h0, d_first_counter_1} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] d_first_counter1_1 = _d_first_counter1_T_1[2:0]; // @[Edges.scala:230:28]
wire d_first_1 = d_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T_2 = d_first_counter_1 == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_3 = d_first_beats1_1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last_1 = _d_first_last_T_2 | _d_first_last_T_3; // @[Edges.scala:232:{25,33,43}]
wire d_first_done_1 = d_first_last_1 & _d_first_T_1; // @[Decoupled.scala:51:35]
wire [2:0] _d_first_count_T_1 = ~d_first_counter1_1; // @[Edges.scala:230:28, :234:27]
wire [2:0] d_first_count_1 = d_first_beats1_1 & _d_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _d_first_counter_T_1 = d_first_1 ? d_first_beats1_1 : d_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [64:0] a_set; // @[Monitor.scala:626:34]
wire [64:0] a_set_wo_ready; // @[Monitor.scala:627:34]
wire [259:0] a_opcodes_set; // @[Monitor.scala:630:33]
wire [259:0] a_sizes_set; // @[Monitor.scala:632:31]
wire [2:0] a_opcode_lookup; // @[Monitor.scala:635:35]
wire [9:0] _GEN_1 = {1'h0, io_in_d_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :637:69]
wire [9:0] _a_opcode_lookup_T; // @[Monitor.scala:637:69]
assign _a_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69]
wire [9:0] _a_size_lookup_T; // @[Monitor.scala:641:65]
assign _a_size_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :641:65]
wire [9:0] _d_opcodes_clr_T_4; // @[Monitor.scala:680:101]
assign _d_opcodes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :680:101]
wire [9:0] _d_sizes_clr_T_4; // @[Monitor.scala:681:99]
assign _d_sizes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :681:99]
wire [9:0] _c_opcode_lookup_T; // @[Monitor.scala:749:69]
assign _c_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :749:69]
wire [9:0] _c_size_lookup_T; // @[Monitor.scala:750:67]
assign _c_size_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :750:67]
wire [9:0] _d_opcodes_clr_T_10; // @[Monitor.scala:790:101]
assign _d_opcodes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :790:101]
wire [9:0] _d_sizes_clr_T_10; // @[Monitor.scala:791:99]
assign _d_sizes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :791:99]
wire [259:0] _a_opcode_lookup_T_1 = inflight_opcodes >> _a_opcode_lookup_T; // @[Monitor.scala:616:35, :637:{44,69}]
wire [259:0] _a_opcode_lookup_T_6 = {256'h0, _a_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:637:{44,97}]
wire [259:0] _a_opcode_lookup_T_7 = {1'h0, _a_opcode_lookup_T_6[259:1]}; // @[Monitor.scala:637:{97,152}]
assign a_opcode_lookup = _a_opcode_lookup_T_7[2:0]; // @[Monitor.scala:635:35, :637:{21,152}]
wire [3:0] a_size_lookup; // @[Monitor.scala:639:33]
wire [259:0] _a_size_lookup_T_1 = inflight_sizes >> _a_size_lookup_T; // @[Monitor.scala:618:33, :641:{40,65}]
wire [259:0] _a_size_lookup_T_6 = {256'h0, _a_size_lookup_T_1[3:0]}; // @[Monitor.scala:641:{40,91}]
wire [259:0] _a_size_lookup_T_7 = {1'h0, _a_size_lookup_T_6[259:1]}; // @[Monitor.scala:641:{91,144}]
assign a_size_lookup = _a_size_lookup_T_7[3:0]; // @[Monitor.scala:639:33, :641:{19,144}]
wire [3:0] a_opcodes_set_interm; // @[Monitor.scala:646:40]
wire [3:0] a_sizes_set_interm; // @[Monitor.scala:648:38]
wire _same_cycle_resp_T = io_in_a_valid_0 & a_first_1; // @[Monitor.scala:36:7, :651:26, :684:44]
wire [127:0] _GEN_2 = 128'h1 << io_in_a_bits_source_0; // @[OneHot.scala:58:35]
wire [127:0] _a_set_wo_ready_T; // @[OneHot.scala:58:35]
assign _a_set_wo_ready_T = _GEN_2; // @[OneHot.scala:58:35]
wire [127:0] _a_set_T; // @[OneHot.scala:58:35]
assign _a_set_T = _GEN_2; // @[OneHot.scala:58:35]
assign a_set_wo_ready = _same_cycle_resp_T ? _a_set_wo_ready_T[64:0] : 65'h0; // @[OneHot.scala:58:35]
wire _T_1076 = _T_1143 & a_first_1; // @[Decoupled.scala:51:35]
assign a_set = _T_1076 ? _a_set_T[64:0] : 65'h0; // @[OneHot.scala:58:35]
wire [3:0] _a_opcodes_set_interm_T = {io_in_a_bits_opcode_0, 1'h0}; // @[Monitor.scala:36:7, :657:53]
wire [3:0] _a_opcodes_set_interm_T_1 = {_a_opcodes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:657:{53,61}]
assign a_opcodes_set_interm = _T_1076 ? _a_opcodes_set_interm_T_1 : 4'h0; // @[Monitor.scala:646:40, :655:{25,70}, :657:{28,61}]
wire [3:0] _a_sizes_set_interm_T = {io_in_a_bits_size_0, 1'h0}; // @[Monitor.scala:36:7, :658:51]
wire [3:0] _a_sizes_set_interm_T_1 = {_a_sizes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:658:{51,59}]
assign a_sizes_set_interm = _T_1076 ? _a_sizes_set_interm_T_1 : 4'h0; // @[Monitor.scala:648:38, :655:{25,70}, :658:{28,59}]
wire [9:0] _GEN_3 = {1'h0, io_in_a_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :659:79]
wire [9:0] _a_opcodes_set_T; // @[Monitor.scala:659:79]
assign _a_opcodes_set_T = _GEN_3; // @[Monitor.scala:659:79]
wire [9:0] _a_sizes_set_T; // @[Monitor.scala:660:77]
assign _a_sizes_set_T = _GEN_3; // @[Monitor.scala:659:79, :660:77]
wire [1026:0] _a_opcodes_set_T_1 = {1023'h0, a_opcodes_set_interm} << _a_opcodes_set_T; // @[Monitor.scala:646:40, :659:{54,79}]
assign a_opcodes_set = _T_1076 ? _a_opcodes_set_T_1[259:0] : 260'h0; // @[Monitor.scala:630:33, :655:{25,70}, :659:{28,54}]
wire [1026:0] _a_sizes_set_T_1 = {1023'h0, a_sizes_set_interm} << _a_sizes_set_T; // @[Monitor.scala:648:38, :659:54, :660:{52,77}]
assign a_sizes_set = _T_1076 ? _a_sizes_set_T_1[259:0] : 260'h0; // @[Monitor.scala:632:31, :655:{25,70}, :660:{28,52}]
wire [64:0] d_clr; // @[Monitor.scala:664:34]
wire [64:0] d_clr_wo_ready; // @[Monitor.scala:665:34]
wire [259:0] d_opcodes_clr; // @[Monitor.scala:668:33]
wire [259:0] d_sizes_clr; // @[Monitor.scala:670:31]
wire _T_1122 = io_in_d_valid_0 & d_first_1; // @[Monitor.scala:36:7, :674:26]
wire [127:0] _GEN_4 = 128'h1 << io_in_d_bits_source_0; // @[OneHot.scala:58:35]
wire [127:0] _d_clr_wo_ready_T; // @[OneHot.scala:58:35]
assign _d_clr_wo_ready_T = _GEN_4; // @[OneHot.scala:58:35]
wire [127:0] _d_clr_T; // @[OneHot.scala:58:35]
assign _d_clr_T = _GEN_4; // @[OneHot.scala:58:35]
wire [127:0] _d_clr_wo_ready_T_1; // @[OneHot.scala:58:35]
assign _d_clr_wo_ready_T_1 = _GEN_4; // @[OneHot.scala:58:35]
wire [127:0] _d_clr_T_1; // @[OneHot.scala:58:35]
assign _d_clr_T_1 = _GEN_4; // @[OneHot.scala:58:35]
assign d_clr_wo_ready = _T_1122 ? _d_clr_wo_ready_T[64:0] : 65'h0; // @[OneHot.scala:58:35]
wire _T_1089 = _T_1211 & d_first_1; // @[Decoupled.scala:51:35]
assign d_clr = _T_1089 ? _d_clr_T[64:0] : 65'h0; // @[OneHot.scala:58:35]
wire [1038:0] _d_opcodes_clr_T_5 = 1039'hF << _d_opcodes_clr_T_4; // @[Monitor.scala:680:{76,101}]
assign d_opcodes_clr = _T_1089 ? _d_opcodes_clr_T_5[259:0] : 260'h0; // @[Monitor.scala:668:33, :678:{25,89}, :680:{21,76}]
wire [1038:0] _d_sizes_clr_T_5 = 1039'hF << _d_sizes_clr_T_4; // @[Monitor.scala:681:{74,99}]
assign d_sizes_clr = _T_1089 ? _d_sizes_clr_T_5[259:0] : 260'h0; // @[Monitor.scala:670:31, :678:{25,89}, :681:{21,74}]
wire _same_cycle_resp_T_1 = _same_cycle_resp_T; // @[Monitor.scala:684:{44,55}]
wire _same_cycle_resp_T_2 = io_in_a_bits_source_0 == io_in_d_bits_source_0; // @[Monitor.scala:36:7, :684:113]
wire same_cycle_resp = _same_cycle_resp_T_1 & _same_cycle_resp_T_2; // @[Monitor.scala:684:{55,88,113}]
wire [64:0] _inflight_T = inflight | a_set; // @[Monitor.scala:614:27, :626:34, :705:27]
wire [64:0] _inflight_T_1 = ~d_clr; // @[Monitor.scala:664:34, :705:38]
wire [64:0] _inflight_T_2 = _inflight_T & _inflight_T_1; // @[Monitor.scala:705:{27,36,38}]
wire [259:0] _inflight_opcodes_T = inflight_opcodes | a_opcodes_set; // @[Monitor.scala:616:35, :630:33, :706:43]
wire [259:0] _inflight_opcodes_T_1 = ~d_opcodes_clr; // @[Monitor.scala:668:33, :706:62]
wire [259:0] _inflight_opcodes_T_2 = _inflight_opcodes_T & _inflight_opcodes_T_1; // @[Monitor.scala:706:{43,60,62}]
wire [259:0] _inflight_sizes_T = inflight_sizes | a_sizes_set; // @[Monitor.scala:618:33, :632:31, :707:39]
wire [259:0] _inflight_sizes_T_1 = ~d_sizes_clr; // @[Monitor.scala:670:31, :707:56]
wire [259:0] _inflight_sizes_T_2 = _inflight_sizes_T & _inflight_sizes_T_1; // @[Monitor.scala:707:{39,54,56}]
reg [31:0] watchdog; // @[Monitor.scala:709:27]
wire [32:0] _watchdog_T = {1'h0, watchdog} + 33'h1; // @[Monitor.scala:709:27, :714:26]
wire [31:0] _watchdog_T_1 = _watchdog_T[31:0]; // @[Monitor.scala:714:26]
reg [64:0] inflight_1; // @[Monitor.scala:726:35]
wire [64:0] _inflight_T_3 = inflight_1; // @[Monitor.scala:726:35, :814:35]
reg [259:0] inflight_opcodes_1; // @[Monitor.scala:727:35]
wire [259:0] _inflight_opcodes_T_3 = inflight_opcodes_1; // @[Monitor.scala:727:35, :815:43]
reg [259:0] inflight_sizes_1; // @[Monitor.scala:728:35]
wire [259:0] _inflight_sizes_T_3 = inflight_sizes_1; // @[Monitor.scala:728:35, :816:41]
wire [5:0] _d_first_beats1_decode_T_7 = _d_first_beats1_decode_T_6[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _d_first_beats1_decode_T_8 = ~_d_first_beats1_decode_T_7; // @[package.scala:243:{46,76}]
wire [2:0] d_first_beats1_decode_2 = _d_first_beats1_decode_T_8[5:3]; // @[package.scala:243:46]
wire [2:0] d_first_beats1_2 = d_first_beats1_decode_2; // @[Edges.scala:220:59, :221:14]
reg [2:0] d_first_counter_2; // @[Edges.scala:229:27]
wire [3:0] _d_first_counter1_T_2 = {1'h0, d_first_counter_2} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] d_first_counter1_2 = _d_first_counter1_T_2[2:0]; // @[Edges.scala:230:28]
wire d_first_2 = d_first_counter_2 == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T_4 = d_first_counter_2 == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_5 = d_first_beats1_2 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last_2 = _d_first_last_T_4 | _d_first_last_T_5; // @[Edges.scala:232:{25,33,43}]
wire d_first_done_2 = d_first_last_2 & _d_first_T_2; // @[Decoupled.scala:51:35]
wire [2:0] _d_first_count_T_2 = ~d_first_counter1_2; // @[Edges.scala:230:28, :234:27]
wire [2:0] d_first_count_2 = d_first_beats1_2 & _d_first_count_T_2; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _d_first_counter_T_2 = d_first_2 ? d_first_beats1_2 : d_first_counter1_2; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [3:0] c_opcode_lookup; // @[Monitor.scala:747:35]
wire [3:0] c_size_lookup; // @[Monitor.scala:748:35]
wire [259:0] _c_opcode_lookup_T_1 = inflight_opcodes_1 >> _c_opcode_lookup_T; // @[Monitor.scala:727:35, :749:{44,69}]
wire [259:0] _c_opcode_lookup_T_6 = {256'h0, _c_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:749:{44,97}]
wire [259:0] _c_opcode_lookup_T_7 = {1'h0, _c_opcode_lookup_T_6[259:1]}; // @[Monitor.scala:749:{97,152}]
assign c_opcode_lookup = _c_opcode_lookup_T_7[3:0]; // @[Monitor.scala:747:35, :749:{21,152}]
wire [259:0] _c_size_lookup_T_1 = inflight_sizes_1 >> _c_size_lookup_T; // @[Monitor.scala:728:35, :750:{42,67}]
wire [259:0] _c_size_lookup_T_6 = {256'h0, _c_size_lookup_T_1[3:0]}; // @[Monitor.scala:750:{42,93}]
wire [259:0] _c_size_lookup_T_7 = {1'h0, _c_size_lookup_T_6[259:1]}; // @[Monitor.scala:750:{93,146}]
assign c_size_lookup = _c_size_lookup_T_7[3:0]; // @[Monitor.scala:748:35, :750:{21,146}]
wire [1038:0] _d_opcodes_clr_T_11 = 1039'hF << _d_opcodes_clr_T_10; // @[Monitor.scala:790:{76,101}]
wire [1038:0] _d_sizes_clr_T_11 = 1039'hF << _d_sizes_clr_T_10; // @[Monitor.scala:791:{74,99}]
wire _same_cycle_resp_T_8 = io_in_d_bits_source_0 == 7'h0; // @[Monitor.scala:36:7, :795:113]
wire [64:0] _inflight_T_5 = _inflight_T_3; // @[Monitor.scala:814:{35,44}]
wire [259:0] _inflight_opcodes_T_5 = _inflight_opcodes_T_3; // @[Monitor.scala:815:{43,60}]
wire [259:0] _inflight_sizes_T_5 = _inflight_sizes_T_3; // @[Monitor.scala:816:{41,56}]
reg [31:0] watchdog_1; // @[Monitor.scala:818:27] |
Generate the Verilog code corresponding to the following Chisel files.
File Repeater.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util.{Decoupled, DecoupledIO}
// A Repeater passes its input to its output, unless repeat is asserted.
// When repeat is asserted, the Repeater copies the input and repeats it next cycle.
class Repeater[T <: Data](gen: T) extends Module
{
override def desiredName = s"Repeater_${gen.typeName}"
val io = IO( new Bundle {
val repeat = Input(Bool())
val full = Output(Bool())
val enq = Flipped(Decoupled(gen.cloneType))
val deq = Decoupled(gen.cloneType)
} )
val full = RegInit(false.B)
val saved = Reg(gen.cloneType)
// When !full, a repeater is pass-through
io.deq.valid := io.enq.valid || full
io.enq.ready := io.deq.ready && !full
io.deq.bits := Mux(full, saved, io.enq.bits)
io.full := full
when (io.enq.fire && io.repeat) { full := true.B; saved := io.enq.bits }
when (io.deq.fire && !io.repeat) { full := false.B }
}
object Repeater
{
def apply[T <: Data](enq: DecoupledIO[T], repeat: Bool): DecoupledIO[T] = {
val repeater = Module(new Repeater(chiselTypeOf(enq.bits)))
repeater.io.repeat := repeat
repeater.io.enq <> enq
repeater.io.deq
}
}
| module Repeater_TLBundleA_a28d64s4k1z3u( // @[Repeater.scala:10:7]
input clock, // @[Repeater.scala:10:7]
input reset, // @[Repeater.scala:10:7]
input io_repeat, // @[Repeater.scala:13:14]
output io_full, // @[Repeater.scala:13:14]
output io_enq_ready, // @[Repeater.scala:13:14]
input io_enq_valid, // @[Repeater.scala:13:14]
input [2:0] io_enq_bits_opcode, // @[Repeater.scala:13:14]
input [2:0] io_enq_bits_param, // @[Repeater.scala:13:14]
input [2:0] io_enq_bits_size, // @[Repeater.scala:13:14]
input [3:0] io_enq_bits_source, // @[Repeater.scala:13:14]
input [27:0] io_enq_bits_address, // @[Repeater.scala:13:14]
input [7:0] io_enq_bits_mask, // @[Repeater.scala:13:14]
input [63:0] io_enq_bits_data, // @[Repeater.scala:13:14]
input io_enq_bits_corrupt, // @[Repeater.scala:13:14]
input io_deq_ready, // @[Repeater.scala:13:14]
output io_deq_valid, // @[Repeater.scala:13:14]
output [2:0] io_deq_bits_opcode, // @[Repeater.scala:13:14]
output [2:0] io_deq_bits_param, // @[Repeater.scala:13:14]
output [2:0] io_deq_bits_size, // @[Repeater.scala:13:14]
output [3:0] io_deq_bits_source, // @[Repeater.scala:13:14]
output [27:0] io_deq_bits_address, // @[Repeater.scala:13:14]
output [7:0] io_deq_bits_mask, // @[Repeater.scala:13:14]
output io_deq_bits_corrupt // @[Repeater.scala:13:14]
);
wire io_repeat_0 = io_repeat; // @[Repeater.scala:10:7]
wire io_enq_valid_0 = io_enq_valid; // @[Repeater.scala:10:7]
wire [2:0] io_enq_bits_opcode_0 = io_enq_bits_opcode; // @[Repeater.scala:10:7]
wire [2:0] io_enq_bits_param_0 = io_enq_bits_param; // @[Repeater.scala:10:7]
wire [2:0] io_enq_bits_size_0 = io_enq_bits_size; // @[Repeater.scala:10:7]
wire [3:0] io_enq_bits_source_0 = io_enq_bits_source; // @[Repeater.scala:10:7]
wire [27:0] io_enq_bits_address_0 = io_enq_bits_address; // @[Repeater.scala:10:7]
wire [7:0] io_enq_bits_mask_0 = io_enq_bits_mask; // @[Repeater.scala:10:7]
wire [63:0] io_enq_bits_data_0 = io_enq_bits_data; // @[Repeater.scala:10:7]
wire io_enq_bits_corrupt_0 = io_enq_bits_corrupt; // @[Repeater.scala:10:7]
wire io_deq_ready_0 = io_deq_ready; // @[Repeater.scala:10:7]
wire _io_enq_ready_T_1; // @[Repeater.scala:25:32]
wire _io_deq_valid_T; // @[Repeater.scala:24:32]
wire [2:0] _io_deq_bits_T_opcode; // @[Repeater.scala:26:21]
wire [2:0] _io_deq_bits_T_param; // @[Repeater.scala:26:21]
wire [2:0] _io_deq_bits_T_size; // @[Repeater.scala:26:21]
wire [3:0] _io_deq_bits_T_source; // @[Repeater.scala:26:21]
wire [27:0] _io_deq_bits_T_address; // @[Repeater.scala:26:21]
wire [7:0] _io_deq_bits_T_mask; // @[Repeater.scala:26:21]
wire [63:0] _io_deq_bits_T_data; // @[Repeater.scala:26:21]
wire _io_deq_bits_T_corrupt; // @[Repeater.scala:26:21]
wire io_enq_ready_0; // @[Repeater.scala:10:7]
wire [2:0] io_deq_bits_opcode_0; // @[Repeater.scala:10:7]
wire [2:0] io_deq_bits_param_0; // @[Repeater.scala:10:7]
wire [2:0] io_deq_bits_size_0; // @[Repeater.scala:10:7]
wire [3:0] io_deq_bits_source_0; // @[Repeater.scala:10:7]
wire [27:0] io_deq_bits_address_0; // @[Repeater.scala:10:7]
wire [7:0] io_deq_bits_mask_0; // @[Repeater.scala:10:7]
wire [63:0] io_deq_bits_data; // @[Repeater.scala:10:7]
wire io_deq_bits_corrupt_0; // @[Repeater.scala:10:7]
wire io_deq_valid_0; // @[Repeater.scala:10:7]
wire io_full_0; // @[Repeater.scala:10:7]
reg full; // @[Repeater.scala:20:21]
assign io_full_0 = full; // @[Repeater.scala:10:7, :20:21]
reg [2:0] saved_opcode; // @[Repeater.scala:21:18]
reg [2:0] saved_param; // @[Repeater.scala:21:18]
reg [2:0] saved_size; // @[Repeater.scala:21:18]
reg [3:0] saved_source; // @[Repeater.scala:21:18]
reg [27:0] saved_address; // @[Repeater.scala:21:18]
reg [7:0] saved_mask; // @[Repeater.scala:21:18]
reg [63:0] saved_data; // @[Repeater.scala:21:18]
reg saved_corrupt; // @[Repeater.scala:21:18]
assign _io_deq_valid_T = io_enq_valid_0 | full; // @[Repeater.scala:10:7, :20:21, :24:32]
assign io_deq_valid_0 = _io_deq_valid_T; // @[Repeater.scala:10:7, :24:32]
wire _io_enq_ready_T = ~full; // @[Repeater.scala:20:21, :25:35]
assign _io_enq_ready_T_1 = io_deq_ready_0 & _io_enq_ready_T; // @[Repeater.scala:10:7, :25:{32,35}]
assign io_enq_ready_0 = _io_enq_ready_T_1; // @[Repeater.scala:10:7, :25:32]
assign _io_deq_bits_T_opcode = full ? saved_opcode : io_enq_bits_opcode_0; // @[Repeater.scala:10:7, :20:21, :21:18, :26:21]
assign _io_deq_bits_T_param = full ? saved_param : io_enq_bits_param_0; // @[Repeater.scala:10:7, :20:21, :21:18, :26:21]
assign _io_deq_bits_T_size = full ? saved_size : io_enq_bits_size_0; // @[Repeater.scala:10:7, :20:21, :21:18, :26:21]
assign _io_deq_bits_T_source = full ? saved_source : io_enq_bits_source_0; // @[Repeater.scala:10:7, :20:21, :21:18, :26:21]
assign _io_deq_bits_T_address = full ? saved_address : io_enq_bits_address_0; // @[Repeater.scala:10:7, :20:21, :21:18, :26:21]
assign _io_deq_bits_T_mask = full ? saved_mask : io_enq_bits_mask_0; // @[Repeater.scala:10:7, :20:21, :21:18, :26:21]
assign _io_deq_bits_T_data = full ? saved_data : io_enq_bits_data_0; // @[Repeater.scala:10:7, :20:21, :21:18, :26:21]
assign _io_deq_bits_T_corrupt = full ? saved_corrupt : io_enq_bits_corrupt_0; // @[Repeater.scala:10:7, :20:21, :21:18, :26:21]
assign io_deq_bits_opcode_0 = _io_deq_bits_T_opcode; // @[Repeater.scala:10:7, :26:21]
assign io_deq_bits_param_0 = _io_deq_bits_T_param; // @[Repeater.scala:10:7, :26:21]
assign io_deq_bits_size_0 = _io_deq_bits_T_size; // @[Repeater.scala:10:7, :26:21]
assign io_deq_bits_source_0 = _io_deq_bits_T_source; // @[Repeater.scala:10:7, :26:21]
assign io_deq_bits_address_0 = _io_deq_bits_T_address; // @[Repeater.scala:10:7, :26:21]
assign io_deq_bits_mask_0 = _io_deq_bits_T_mask; // @[Repeater.scala:10:7, :26:21]
assign io_deq_bits_data = _io_deq_bits_T_data; // @[Repeater.scala:10:7, :26:21]
assign io_deq_bits_corrupt_0 = _io_deq_bits_T_corrupt; // @[Repeater.scala:10:7, :26:21]
wire _T_1 = io_enq_ready_0 & io_enq_valid_0 & io_repeat_0; // @[Decoupled.scala:51:35]
always @(posedge clock) begin // @[Repeater.scala:10:7]
if (reset) // @[Repeater.scala:10:7]
full <= 1'h0; // @[Repeater.scala:20:21]
else // @[Repeater.scala:10:7]
full <= ~(io_deq_ready_0 & io_deq_valid_0 & ~io_repeat_0) & (_T_1 | full); // @[Decoupled.scala:51:35]
if (_T_1) begin // @[Decoupled.scala:51:35]
saved_opcode <= io_enq_bits_opcode_0; // @[Repeater.scala:10:7, :21:18]
saved_param <= io_enq_bits_param_0; // @[Repeater.scala:10:7, :21:18]
saved_size <= io_enq_bits_size_0; // @[Repeater.scala:10:7, :21:18]
saved_source <= io_enq_bits_source_0; // @[Repeater.scala:10:7, :21:18]
saved_address <= io_enq_bits_address_0; // @[Repeater.scala:10:7, :21:18]
saved_mask <= io_enq_bits_mask_0; // @[Repeater.scala:10:7, :21:18]
saved_data <= io_enq_bits_data_0; // @[Repeater.scala:10:7, :21:18]
saved_corrupt <= io_enq_bits_corrupt_0; // @[Repeater.scala:10:7, :21:18]
end
always @(posedge)
assign io_full = io_full_0; // @[Repeater.scala:10:7]
assign io_enq_ready = io_enq_ready_0; // @[Repeater.scala:10:7]
assign io_deq_valid = io_deq_valid_0; // @[Repeater.scala:10:7]
assign io_deq_bits_opcode = io_deq_bits_opcode_0; // @[Repeater.scala:10:7]
assign io_deq_bits_param = io_deq_bits_param_0; // @[Repeater.scala:10:7]
assign io_deq_bits_size = io_deq_bits_size_0; // @[Repeater.scala:10:7]
assign io_deq_bits_source = io_deq_bits_source_0; // @[Repeater.scala:10:7]
assign io_deq_bits_address = io_deq_bits_address_0; // @[Repeater.scala:10:7]
assign io_deq_bits_mask = io_deq_bits_mask_0; // @[Repeater.scala:10:7]
assign io_deq_bits_corrupt = io_deq_bits_corrupt_0; // @[Repeater.scala:10:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File SynchronizerReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util.{RegEnable, Cat}
/** These wrap behavioral
* shift and next registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
*
* These are built up of *ResetSynchronizerPrimitiveShiftReg,
* intended to be replaced by the integrator's metastable flops chains or replaced
* at this level if they have a multi-bit wide synchronizer primitive.
* The different types vary in their reset behavior:
* NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin
* AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep
* 1-bit-wide shift registers.
* SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg
*
* [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference.
*
* ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross
* Clock Domains.
*/
object SynchronizerResetType extends Enumeration {
val NonSync, Inferred, Sync, Async = Value
}
// Note: this should not be used directly.
// Use the companion object to generate this with the correct reset type mixin.
private class SynchronizerPrimitiveShiftReg(
sync: Int,
init: Boolean,
resetType: SynchronizerResetType.Value)
extends AbstractPipelineReg(1) {
val initInt = if (init) 1 else 0
val initPostfix = resetType match {
case SynchronizerResetType.NonSync => ""
case _ => s"_i${initInt}"
}
override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}"
val chain = List.tabulate(sync) { i =>
val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B)
reg.suggestName(s"sync_$i")
}
chain.last := io.d.asBool
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink := source
}
io.q := chain.head.asUInt
}
private object SynchronizerPrimitiveShiftReg {
def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = {
val gen: () => SynchronizerPrimitiveShiftReg = resetType match {
case SynchronizerResetType.NonSync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
case SynchronizerResetType.Async =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset
case SynchronizerResetType.Sync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset
case SynchronizerResetType.Inferred =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
}
AbstractPipelineReg(gen(), in)
}
}
// Note: This module may end up with a non-AsyncReset type reset.
// But the Primitives within will always have AsyncReset type.
class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int)
extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asAsyncReset){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async)
}
}
io.q := Cat(output.reverse)
}
object AsyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
// Note: This module may end up with a non-Bool type reset.
// But the Primitives within will always have Bool reset type.
@deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2")
class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asBool){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync)
}
}
io.q := Cat(output.reverse)
}
object SyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred)
}
io.q := Cat(output.reverse)
}
object ResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}"
val output = Seq.tabulate(w) { i =>
SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync)
}
io.q := Cat(output.reverse)
}
object SynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, None)
def apply [T <: Data](in: T): T =
apply (in, 3, None)
}
class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module {
override def desiredName = s"ClockCrossingReg_w${w}"
val io = IO(new Bundle{
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
val en = Input(Bool())
})
val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en)
io.q := cdc_reg
}
object ClockCrossingReg {
def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = {
val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit))
name.foreach{ cdc_reg.suggestName(_) }
cdc_reg.io.d := in.asUInt
cdc_reg.io.en := en
cdc_reg.io.q.asTypeOf(in)
}
}
| module AsyncResetSynchronizerShiftReg_w4_d3_i0_7( // @[SynchronizerReg.scala:80:7]
input clock, // @[SynchronizerReg.scala:80:7]
input reset, // @[SynchronizerReg.scala:80:7]
input [3:0] io_d, // @[ShiftReg.scala:36:14]
output [3:0] io_q // @[ShiftReg.scala:36:14]
);
wire [3:0] io_d_0 = io_d; // @[SynchronizerReg.scala:80:7]
wire _output_T = reset; // @[SynchronizerReg.scala:86:21]
wire _output_T_2 = reset; // @[SynchronizerReg.scala:86:21]
wire _output_T_4 = reset; // @[SynchronizerReg.scala:86:21]
wire _output_T_6 = reset; // @[SynchronizerReg.scala:86:21]
wire [3:0] _io_q_T; // @[SynchronizerReg.scala:90:14]
wire [3:0] io_q_0; // @[SynchronizerReg.scala:80:7]
wire _output_T_1 = io_d_0[0]; // @[SynchronizerReg.scala:80:7, :87:41]
wire output_0; // @[ShiftReg.scala:48:24]
wire _output_T_3 = io_d_0[1]; // @[SynchronizerReg.scala:80:7, :87:41]
wire output_1; // @[ShiftReg.scala:48:24]
wire _output_T_5 = io_d_0[2]; // @[SynchronizerReg.scala:80:7, :87:41]
wire output_2; // @[ShiftReg.scala:48:24]
wire _output_T_7 = io_d_0[3]; // @[SynchronizerReg.scala:80:7, :87:41]
wire output_3; // @[ShiftReg.scala:48:24]
wire [1:0] io_q_lo = {output_1, output_0}; // @[SynchronizerReg.scala:90:14]
wire [1:0] io_q_hi = {output_3, output_2}; // @[SynchronizerReg.scala:90:14]
assign _io_q_T = {io_q_hi, io_q_lo}; // @[SynchronizerReg.scala:90:14]
assign io_q_0 = _io_q_T; // @[SynchronizerReg.scala:80:7, :90:14]
AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_56 output_chain ( // @[ShiftReg.scala:45:23]
.clock (clock),
.reset (_output_T), // @[SynchronizerReg.scala:86:21]
.io_d (_output_T_1), // @[SynchronizerReg.scala:87:41]
.io_q (output_0)
); // @[ShiftReg.scala:45:23]
AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_57 output_chain_1 ( // @[ShiftReg.scala:45:23]
.clock (clock),
.reset (_output_T_2), // @[SynchronizerReg.scala:86:21]
.io_d (_output_T_3), // @[SynchronizerReg.scala:87:41]
.io_q (output_1)
); // @[ShiftReg.scala:45:23]
AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_58 output_chain_2 ( // @[ShiftReg.scala:45:23]
.clock (clock),
.reset (_output_T_4), // @[SynchronizerReg.scala:86:21]
.io_d (_output_T_5), // @[SynchronizerReg.scala:87:41]
.io_q (output_2)
); // @[ShiftReg.scala:45:23]
AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_59 output_chain_3 ( // @[ShiftReg.scala:45:23]
.clock (clock),
.reset (_output_T_6), // @[SynchronizerReg.scala:86:21]
.io_d (_output_T_7), // @[SynchronizerReg.scala:87:41]
.io_q (output_3)
); // @[ShiftReg.scala:45:23]
assign io_q = io_q_0; // @[SynchronizerReg.scala:80:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File MulAddRecFN.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import chisel3.util._
import consts._
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class MulAddRecFN_interIo(expWidth: Int, sigWidth: Int) extends Bundle
{
//*** ENCODE SOME OF THESE CASES IN FEWER BITS?:
val isSigNaNAny = Bool()
val isNaNAOrB = Bool()
val isInfA = Bool()
val isZeroA = Bool()
val isInfB = Bool()
val isZeroB = Bool()
val signProd = Bool()
val isNaNC = Bool()
val isInfC = Bool()
val isZeroC = Bool()
val sExpSum = SInt((expWidth + 2).W)
val doSubMags = Bool()
val CIsDominant = Bool()
val CDom_CAlignDist = UInt(log2Ceil(sigWidth + 1).W)
val highAlignedSigC = UInt((sigWidth + 2).W)
val bit0AlignedSigC = UInt(1.W)
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class MulAddRecFNToRaw_preMul(expWidth: Int, sigWidth: Int) extends RawModule
{
override def desiredName = s"MulAddRecFNToRaw_preMul_e${expWidth}_s${sigWidth}"
val io = IO(new Bundle {
val op = Input(Bits(2.W))
val a = Input(Bits((expWidth + sigWidth + 1).W))
val b = Input(Bits((expWidth + sigWidth + 1).W))
val c = Input(Bits((expWidth + sigWidth + 1).W))
val mulAddA = Output(UInt(sigWidth.W))
val mulAddB = Output(UInt(sigWidth.W))
val mulAddC = Output(UInt((sigWidth * 2).W))
val toPostMul = Output(new MulAddRecFN_interIo(expWidth, sigWidth))
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
//*** POSSIBLE TO REDUCE THIS BY 1 OR 2 BITS? (CURRENTLY 2 BITS BETWEEN
//*** UNSHIFTED C AND PRODUCT):
val sigSumWidth = sigWidth * 3 + 3
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val rawA = rawFloatFromRecFN(expWidth, sigWidth, io.a)
val rawB = rawFloatFromRecFN(expWidth, sigWidth, io.b)
val rawC = rawFloatFromRecFN(expWidth, sigWidth, io.c)
val signProd = rawA.sign ^ rawB.sign ^ io.op(1)
//*** REVIEW THE BIAS FOR 'sExpAlignedProd':
val sExpAlignedProd =
rawA.sExp +& rawB.sExp + (-(BigInt(1)<<expWidth) + sigWidth + 3).S
val doSubMags = signProd ^ rawC.sign ^ io.op(0)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val sNatCAlignDist = sExpAlignedProd - rawC.sExp
val posNatCAlignDist = sNatCAlignDist(expWidth + 1, 0)
val isMinCAlign = rawA.isZero || rawB.isZero || (sNatCAlignDist < 0.S)
val CIsDominant =
! rawC.isZero && (isMinCAlign || (posNatCAlignDist <= sigWidth.U))
val CAlignDist =
Mux(isMinCAlign,
0.U,
Mux(posNatCAlignDist < (sigSumWidth - 1).U,
posNatCAlignDist(log2Ceil(sigSumWidth) - 1, 0),
(sigSumWidth - 1).U
)
)
val mainAlignedSigC =
(Mux(doSubMags, ~rawC.sig, rawC.sig) ## Fill(sigSumWidth - sigWidth + 2, doSubMags)).asSInt>>CAlignDist
val reduced4CExtra =
(orReduceBy4(rawC.sig<<((sigSumWidth - sigWidth - 1) & 3)) &
lowMask(
CAlignDist>>2,
//*** NOT NEEDED?:
// (sigSumWidth + 2)>>2,
(sigSumWidth - 1)>>2,
(sigSumWidth - sigWidth - 1)>>2
)
).orR
val alignedSigC =
Cat(mainAlignedSigC>>3,
Mux(doSubMags,
mainAlignedSigC(2, 0).andR && ! reduced4CExtra,
mainAlignedSigC(2, 0).orR || reduced4CExtra
)
)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
io.mulAddA := rawA.sig
io.mulAddB := rawB.sig
io.mulAddC := alignedSigC(sigWidth * 2, 1)
io.toPostMul.isSigNaNAny :=
isSigNaNRawFloat(rawA) || isSigNaNRawFloat(rawB) ||
isSigNaNRawFloat(rawC)
io.toPostMul.isNaNAOrB := rawA.isNaN || rawB.isNaN
io.toPostMul.isInfA := rawA.isInf
io.toPostMul.isZeroA := rawA.isZero
io.toPostMul.isInfB := rawB.isInf
io.toPostMul.isZeroB := rawB.isZero
io.toPostMul.signProd := signProd
io.toPostMul.isNaNC := rawC.isNaN
io.toPostMul.isInfC := rawC.isInf
io.toPostMul.isZeroC := rawC.isZero
io.toPostMul.sExpSum :=
Mux(CIsDominant, rawC.sExp, sExpAlignedProd - sigWidth.S)
io.toPostMul.doSubMags := doSubMags
io.toPostMul.CIsDominant := CIsDominant
io.toPostMul.CDom_CAlignDist := CAlignDist(log2Ceil(sigWidth + 1) - 1, 0)
io.toPostMul.highAlignedSigC :=
alignedSigC(sigSumWidth - 1, sigWidth * 2 + 1)
io.toPostMul.bit0AlignedSigC := alignedSigC(0)
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class MulAddRecFNToRaw_postMul(expWidth: Int, sigWidth: Int) extends RawModule
{
override def desiredName = s"MulAddRecFNToRaw_postMul_e${expWidth}_s${sigWidth}"
val io = IO(new Bundle {
val fromPreMul = Input(new MulAddRecFN_interIo(expWidth, sigWidth))
val mulAddResult = Input(UInt((sigWidth * 2 + 1).W))
val roundingMode = Input(UInt(3.W))
val invalidExc = Output(Bool())
val rawOut = Output(new RawFloat(expWidth, sigWidth + 2))
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val sigSumWidth = sigWidth * 3 + 3
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val roundingMode_min = (io.roundingMode === round_min)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val opSignC = io.fromPreMul.signProd ^ io.fromPreMul.doSubMags
val sigSum =
Cat(Mux(io.mulAddResult(sigWidth * 2),
io.fromPreMul.highAlignedSigC + 1.U,
io.fromPreMul.highAlignedSigC
),
io.mulAddResult(sigWidth * 2 - 1, 0),
io.fromPreMul.bit0AlignedSigC
)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val CDom_sign = opSignC
val CDom_sExp = io.fromPreMul.sExpSum - io.fromPreMul.doSubMags.zext
val CDom_absSigSum =
Mux(io.fromPreMul.doSubMags,
~sigSum(sigSumWidth - 1, sigWidth + 1),
0.U(1.W) ##
//*** IF GAP IS REDUCED TO 1 BIT, MUST REDUCE THIS COMPONENT TO 1 BIT TOO:
io.fromPreMul.highAlignedSigC(sigWidth + 1, sigWidth) ##
sigSum(sigSumWidth - 3, sigWidth + 2)
)
val CDom_absSigSumExtra =
Mux(io.fromPreMul.doSubMags,
(~sigSum(sigWidth, 1)).orR,
sigSum(sigWidth + 1, 1).orR
)
val CDom_mainSig =
(CDom_absSigSum<<io.fromPreMul.CDom_CAlignDist)(
sigWidth * 2 + 1, sigWidth - 3)
val CDom_reduced4SigExtra =
(orReduceBy4(CDom_absSigSum(sigWidth - 1, 0)<<(~sigWidth & 3)) &
lowMask(io.fromPreMul.CDom_CAlignDist>>2, 0, sigWidth>>2)).orR
val CDom_sig =
Cat(CDom_mainSig>>3,
CDom_mainSig(2, 0).orR || CDom_reduced4SigExtra ||
CDom_absSigSumExtra
)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val notCDom_signSigSum = sigSum(sigWidth * 2 + 3)
val notCDom_absSigSum =
Mux(notCDom_signSigSum,
~sigSum(sigWidth * 2 + 2, 0),
sigSum(sigWidth * 2 + 2, 0) + io.fromPreMul.doSubMags
)
val notCDom_reduced2AbsSigSum = orReduceBy2(notCDom_absSigSum)
val notCDom_normDistReduced2 = countLeadingZeros(notCDom_reduced2AbsSigSum)
val notCDom_nearNormDist = notCDom_normDistReduced2<<1
val notCDom_sExp = io.fromPreMul.sExpSum - notCDom_nearNormDist.asUInt.zext
val notCDom_mainSig =
(notCDom_absSigSum<<notCDom_nearNormDist)(
sigWidth * 2 + 3, sigWidth - 1)
val notCDom_reduced4SigExtra =
(orReduceBy2(
notCDom_reduced2AbsSigSum(sigWidth>>1, 0)<<((sigWidth>>1) & 1)) &
lowMask(notCDom_normDistReduced2>>1, 0, (sigWidth + 2)>>2)
).orR
val notCDom_sig =
Cat(notCDom_mainSig>>3,
notCDom_mainSig(2, 0).orR || notCDom_reduced4SigExtra
)
val notCDom_completeCancellation =
(notCDom_sig(sigWidth + 2, sigWidth + 1) === 0.U)
val notCDom_sign =
Mux(notCDom_completeCancellation,
roundingMode_min,
io.fromPreMul.signProd ^ notCDom_signSigSum
)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val notNaN_isInfProd = io.fromPreMul.isInfA || io.fromPreMul.isInfB
val notNaN_isInfOut = notNaN_isInfProd || io.fromPreMul.isInfC
val notNaN_addZeros =
(io.fromPreMul.isZeroA || io.fromPreMul.isZeroB) &&
io.fromPreMul.isZeroC
io.invalidExc :=
io.fromPreMul.isSigNaNAny ||
(io.fromPreMul.isInfA && io.fromPreMul.isZeroB) ||
(io.fromPreMul.isZeroA && io.fromPreMul.isInfB) ||
(! io.fromPreMul.isNaNAOrB &&
(io.fromPreMul.isInfA || io.fromPreMul.isInfB) &&
io.fromPreMul.isInfC &&
io.fromPreMul.doSubMags)
io.rawOut.isNaN := io.fromPreMul.isNaNAOrB || io.fromPreMul.isNaNC
io.rawOut.isInf := notNaN_isInfOut
//*** IMPROVE?:
io.rawOut.isZero :=
notNaN_addZeros ||
(! io.fromPreMul.CIsDominant && notCDom_completeCancellation)
io.rawOut.sign :=
(notNaN_isInfProd && io.fromPreMul.signProd) ||
(io.fromPreMul.isInfC && opSignC) ||
(notNaN_addZeros && ! roundingMode_min &&
io.fromPreMul.signProd && opSignC) ||
(notNaN_addZeros && roundingMode_min &&
(io.fromPreMul.signProd || opSignC)) ||
(! notNaN_isInfOut && ! notNaN_addZeros &&
Mux(io.fromPreMul.CIsDominant, CDom_sign, notCDom_sign))
io.rawOut.sExp := Mux(io.fromPreMul.CIsDominant, CDom_sExp, notCDom_sExp)
io.rawOut.sig := Mux(io.fromPreMul.CIsDominant, CDom_sig, notCDom_sig)
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class MulAddRecFN(expWidth: Int, sigWidth: Int) extends RawModule
{
override def desiredName = s"MulAddRecFN_e${expWidth}_s${sigWidth}"
val io = IO(new Bundle {
val op = Input(Bits(2.W))
val a = Input(Bits((expWidth + sigWidth + 1).W))
val b = Input(Bits((expWidth + sigWidth + 1).W))
val c = Input(Bits((expWidth + sigWidth + 1).W))
val roundingMode = Input(UInt(3.W))
val detectTininess = Input(UInt(1.W))
val out = Output(Bits((expWidth + sigWidth + 1).W))
val exceptionFlags = Output(Bits(5.W))
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val mulAddRecFNToRaw_preMul =
Module(new MulAddRecFNToRaw_preMul(expWidth, sigWidth))
val mulAddRecFNToRaw_postMul =
Module(new MulAddRecFNToRaw_postMul(expWidth, sigWidth))
mulAddRecFNToRaw_preMul.io.op := io.op
mulAddRecFNToRaw_preMul.io.a := io.a
mulAddRecFNToRaw_preMul.io.b := io.b
mulAddRecFNToRaw_preMul.io.c := io.c
val mulAddResult =
(mulAddRecFNToRaw_preMul.io.mulAddA *
mulAddRecFNToRaw_preMul.io.mulAddB) +&
mulAddRecFNToRaw_preMul.io.mulAddC
mulAddRecFNToRaw_postMul.io.fromPreMul :=
mulAddRecFNToRaw_preMul.io.toPostMul
mulAddRecFNToRaw_postMul.io.mulAddResult := mulAddResult
mulAddRecFNToRaw_postMul.io.roundingMode := io.roundingMode
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val roundRawFNToRecFN =
Module(new RoundRawFNToRecFN(expWidth, sigWidth, 0))
roundRawFNToRecFN.io.invalidExc := mulAddRecFNToRaw_postMul.io.invalidExc
roundRawFNToRecFN.io.infiniteExc := false.B
roundRawFNToRecFN.io.in := mulAddRecFNToRaw_postMul.io.rawOut
roundRawFNToRecFN.io.roundingMode := io.roundingMode
roundRawFNToRecFN.io.detectTininess := io.detectTininess
io.out := roundRawFNToRecFN.io.out
io.exceptionFlags := roundRawFNToRecFN.io.exceptionFlags
}
| module MulAddRecFN_e8_s24_64( // @[MulAddRecFN.scala:300:7]
input [32:0] io_a, // @[MulAddRecFN.scala:303:16]
input [32:0] io_b, // @[MulAddRecFN.scala:303:16]
input [32:0] io_c, // @[MulAddRecFN.scala:303:16]
output [32:0] io_out // @[MulAddRecFN.scala:303:16]
);
wire _mulAddRecFNToRaw_postMul_io_invalidExc; // @[MulAddRecFN.scala:319:15]
wire _mulAddRecFNToRaw_postMul_io_rawOut_isNaN; // @[MulAddRecFN.scala:319:15]
wire _mulAddRecFNToRaw_postMul_io_rawOut_isInf; // @[MulAddRecFN.scala:319:15]
wire _mulAddRecFNToRaw_postMul_io_rawOut_isZero; // @[MulAddRecFN.scala:319:15]
wire _mulAddRecFNToRaw_postMul_io_rawOut_sign; // @[MulAddRecFN.scala:319:15]
wire [9:0] _mulAddRecFNToRaw_postMul_io_rawOut_sExp; // @[MulAddRecFN.scala:319:15]
wire [26:0] _mulAddRecFNToRaw_postMul_io_rawOut_sig; // @[MulAddRecFN.scala:319:15]
wire [23:0] _mulAddRecFNToRaw_preMul_io_mulAddA; // @[MulAddRecFN.scala:317:15]
wire [23:0] _mulAddRecFNToRaw_preMul_io_mulAddB; // @[MulAddRecFN.scala:317:15]
wire [47:0] _mulAddRecFNToRaw_preMul_io_mulAddC; // @[MulAddRecFN.scala:317:15]
wire _mulAddRecFNToRaw_preMul_io_toPostMul_isSigNaNAny; // @[MulAddRecFN.scala:317:15]
wire _mulAddRecFNToRaw_preMul_io_toPostMul_isNaNAOrB; // @[MulAddRecFN.scala:317:15]
wire _mulAddRecFNToRaw_preMul_io_toPostMul_isInfA; // @[MulAddRecFN.scala:317:15]
wire _mulAddRecFNToRaw_preMul_io_toPostMul_isZeroA; // @[MulAddRecFN.scala:317:15]
wire _mulAddRecFNToRaw_preMul_io_toPostMul_isInfB; // @[MulAddRecFN.scala:317:15]
wire _mulAddRecFNToRaw_preMul_io_toPostMul_isZeroB; // @[MulAddRecFN.scala:317:15]
wire _mulAddRecFNToRaw_preMul_io_toPostMul_signProd; // @[MulAddRecFN.scala:317:15]
wire _mulAddRecFNToRaw_preMul_io_toPostMul_isNaNC; // @[MulAddRecFN.scala:317:15]
wire _mulAddRecFNToRaw_preMul_io_toPostMul_isInfC; // @[MulAddRecFN.scala:317:15]
wire _mulAddRecFNToRaw_preMul_io_toPostMul_isZeroC; // @[MulAddRecFN.scala:317:15]
wire [9:0] _mulAddRecFNToRaw_preMul_io_toPostMul_sExpSum; // @[MulAddRecFN.scala:317:15]
wire _mulAddRecFNToRaw_preMul_io_toPostMul_doSubMags; // @[MulAddRecFN.scala:317:15]
wire _mulAddRecFNToRaw_preMul_io_toPostMul_CIsDominant; // @[MulAddRecFN.scala:317:15]
wire [4:0] _mulAddRecFNToRaw_preMul_io_toPostMul_CDom_CAlignDist; // @[MulAddRecFN.scala:317:15]
wire [25:0] _mulAddRecFNToRaw_preMul_io_toPostMul_highAlignedSigC; // @[MulAddRecFN.scala:317:15]
wire _mulAddRecFNToRaw_preMul_io_toPostMul_bit0AlignedSigC; // @[MulAddRecFN.scala:317:15]
wire [32:0] io_a_0 = io_a; // @[MulAddRecFN.scala:300:7]
wire [32:0] io_b_0 = io_b; // @[MulAddRecFN.scala:300:7]
wire [32:0] io_c_0 = io_c; // @[MulAddRecFN.scala:300:7]
wire io_detectTininess = 1'h1; // @[MulAddRecFN.scala:300:7, :303:16, :339:15]
wire [2:0] io_roundingMode = 3'h0; // @[MulAddRecFN.scala:300:7, :303:16, :319:15, :339:15]
wire [1:0] io_op = 2'h0; // @[MulAddRecFN.scala:300:7, :303:16, :317:15]
wire [32:0] io_out_0; // @[MulAddRecFN.scala:300:7]
wire [4:0] io_exceptionFlags; // @[MulAddRecFN.scala:300:7]
wire [47:0] _mulAddResult_T = {24'h0, _mulAddRecFNToRaw_preMul_io_mulAddA} * {24'h0, _mulAddRecFNToRaw_preMul_io_mulAddB}; // @[MulAddRecFN.scala:317:15, :327:45]
wire [48:0] mulAddResult = {1'h0, _mulAddResult_T} + {1'h0, _mulAddRecFNToRaw_preMul_io_mulAddC}; // @[MulAddRecFN.scala:317:15, :327:45, :328:50]
MulAddRecFNToRaw_preMul_e8_s24_64 mulAddRecFNToRaw_preMul ( // @[MulAddRecFN.scala:317:15]
.io_a (io_a_0), // @[MulAddRecFN.scala:300:7]
.io_b (io_b_0), // @[MulAddRecFN.scala:300:7]
.io_c (io_c_0), // @[MulAddRecFN.scala:300:7]
.io_mulAddA (_mulAddRecFNToRaw_preMul_io_mulAddA),
.io_mulAddB (_mulAddRecFNToRaw_preMul_io_mulAddB),
.io_mulAddC (_mulAddRecFNToRaw_preMul_io_mulAddC),
.io_toPostMul_isSigNaNAny (_mulAddRecFNToRaw_preMul_io_toPostMul_isSigNaNAny),
.io_toPostMul_isNaNAOrB (_mulAddRecFNToRaw_preMul_io_toPostMul_isNaNAOrB),
.io_toPostMul_isInfA (_mulAddRecFNToRaw_preMul_io_toPostMul_isInfA),
.io_toPostMul_isZeroA (_mulAddRecFNToRaw_preMul_io_toPostMul_isZeroA),
.io_toPostMul_isInfB (_mulAddRecFNToRaw_preMul_io_toPostMul_isInfB),
.io_toPostMul_isZeroB (_mulAddRecFNToRaw_preMul_io_toPostMul_isZeroB),
.io_toPostMul_signProd (_mulAddRecFNToRaw_preMul_io_toPostMul_signProd),
.io_toPostMul_isNaNC (_mulAddRecFNToRaw_preMul_io_toPostMul_isNaNC),
.io_toPostMul_isInfC (_mulAddRecFNToRaw_preMul_io_toPostMul_isInfC),
.io_toPostMul_isZeroC (_mulAddRecFNToRaw_preMul_io_toPostMul_isZeroC),
.io_toPostMul_sExpSum (_mulAddRecFNToRaw_preMul_io_toPostMul_sExpSum),
.io_toPostMul_doSubMags (_mulAddRecFNToRaw_preMul_io_toPostMul_doSubMags),
.io_toPostMul_CIsDominant (_mulAddRecFNToRaw_preMul_io_toPostMul_CIsDominant),
.io_toPostMul_CDom_CAlignDist (_mulAddRecFNToRaw_preMul_io_toPostMul_CDom_CAlignDist),
.io_toPostMul_highAlignedSigC (_mulAddRecFNToRaw_preMul_io_toPostMul_highAlignedSigC),
.io_toPostMul_bit0AlignedSigC (_mulAddRecFNToRaw_preMul_io_toPostMul_bit0AlignedSigC)
); // @[MulAddRecFN.scala:317:15]
MulAddRecFNToRaw_postMul_e8_s24_64 mulAddRecFNToRaw_postMul ( // @[MulAddRecFN.scala:319:15]
.io_fromPreMul_isSigNaNAny (_mulAddRecFNToRaw_preMul_io_toPostMul_isSigNaNAny), // @[MulAddRecFN.scala:317:15]
.io_fromPreMul_isNaNAOrB (_mulAddRecFNToRaw_preMul_io_toPostMul_isNaNAOrB), // @[MulAddRecFN.scala:317:15]
.io_fromPreMul_isInfA (_mulAddRecFNToRaw_preMul_io_toPostMul_isInfA), // @[MulAddRecFN.scala:317:15]
.io_fromPreMul_isZeroA (_mulAddRecFNToRaw_preMul_io_toPostMul_isZeroA), // @[MulAddRecFN.scala:317:15]
.io_fromPreMul_isInfB (_mulAddRecFNToRaw_preMul_io_toPostMul_isInfB), // @[MulAddRecFN.scala:317:15]
.io_fromPreMul_isZeroB (_mulAddRecFNToRaw_preMul_io_toPostMul_isZeroB), // @[MulAddRecFN.scala:317:15]
.io_fromPreMul_signProd (_mulAddRecFNToRaw_preMul_io_toPostMul_signProd), // @[MulAddRecFN.scala:317:15]
.io_fromPreMul_isNaNC (_mulAddRecFNToRaw_preMul_io_toPostMul_isNaNC), // @[MulAddRecFN.scala:317:15]
.io_fromPreMul_isInfC (_mulAddRecFNToRaw_preMul_io_toPostMul_isInfC), // @[MulAddRecFN.scala:317:15]
.io_fromPreMul_isZeroC (_mulAddRecFNToRaw_preMul_io_toPostMul_isZeroC), // @[MulAddRecFN.scala:317:15]
.io_fromPreMul_sExpSum (_mulAddRecFNToRaw_preMul_io_toPostMul_sExpSum), // @[MulAddRecFN.scala:317:15]
.io_fromPreMul_doSubMags (_mulAddRecFNToRaw_preMul_io_toPostMul_doSubMags), // @[MulAddRecFN.scala:317:15]
.io_fromPreMul_CIsDominant (_mulAddRecFNToRaw_preMul_io_toPostMul_CIsDominant), // @[MulAddRecFN.scala:317:15]
.io_fromPreMul_CDom_CAlignDist (_mulAddRecFNToRaw_preMul_io_toPostMul_CDom_CAlignDist), // @[MulAddRecFN.scala:317:15]
.io_fromPreMul_highAlignedSigC (_mulAddRecFNToRaw_preMul_io_toPostMul_highAlignedSigC), // @[MulAddRecFN.scala:317:15]
.io_fromPreMul_bit0AlignedSigC (_mulAddRecFNToRaw_preMul_io_toPostMul_bit0AlignedSigC), // @[MulAddRecFN.scala:317:15]
.io_mulAddResult (mulAddResult), // @[MulAddRecFN.scala:328:50]
.io_invalidExc (_mulAddRecFNToRaw_postMul_io_invalidExc),
.io_rawOut_isNaN (_mulAddRecFNToRaw_postMul_io_rawOut_isNaN),
.io_rawOut_isInf (_mulAddRecFNToRaw_postMul_io_rawOut_isInf),
.io_rawOut_isZero (_mulAddRecFNToRaw_postMul_io_rawOut_isZero),
.io_rawOut_sign (_mulAddRecFNToRaw_postMul_io_rawOut_sign),
.io_rawOut_sExp (_mulAddRecFNToRaw_postMul_io_rawOut_sExp),
.io_rawOut_sig (_mulAddRecFNToRaw_postMul_io_rawOut_sig)
); // @[MulAddRecFN.scala:319:15]
RoundRawFNToRecFN_e8_s24_88 roundRawFNToRecFN ( // @[MulAddRecFN.scala:339:15]
.io_invalidExc (_mulAddRecFNToRaw_postMul_io_invalidExc), // @[MulAddRecFN.scala:319:15]
.io_in_isNaN (_mulAddRecFNToRaw_postMul_io_rawOut_isNaN), // @[MulAddRecFN.scala:319:15]
.io_in_isInf (_mulAddRecFNToRaw_postMul_io_rawOut_isInf), // @[MulAddRecFN.scala:319:15]
.io_in_isZero (_mulAddRecFNToRaw_postMul_io_rawOut_isZero), // @[MulAddRecFN.scala:319:15]
.io_in_sign (_mulAddRecFNToRaw_postMul_io_rawOut_sign), // @[MulAddRecFN.scala:319:15]
.io_in_sExp (_mulAddRecFNToRaw_postMul_io_rawOut_sExp), // @[MulAddRecFN.scala:319:15]
.io_in_sig (_mulAddRecFNToRaw_postMul_io_rawOut_sig), // @[MulAddRecFN.scala:319:15]
.io_out (io_out_0),
.io_exceptionFlags (io_exceptionFlags)
); // @[MulAddRecFN.scala:339:15]
assign io_out = io_out_0; // @[MulAddRecFN.scala:300:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Monitor.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceLine
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import freechips.rocketchip.diplomacy.EnableMonitors
import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode}
import freechips.rocketchip.util.PlusArg
case class TLMonitorArgs(edge: TLEdge)
abstract class TLMonitorBase(args: TLMonitorArgs) extends Module
{
val io = IO(new Bundle {
val in = Input(new TLBundle(args.edge.bundle))
})
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit
legalize(io.in, args.edge, reset)
}
object TLMonitor {
def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = {
if (enable) {
EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) }
} else { node }
}
}
class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args)
{
require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal))
val cover_prop_class = PropertyClass.Default
//Like assert but can flip to being an assumption for formal verification
def monAssert(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir, cond, message, PropertyClass.Default)
}
def assume(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir.flip, cond, message, PropertyClass.Default)
}
def extra = {
args.edge.sourceInfo match {
case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)"
case _ => ""
}
}
def visible(address: UInt, source: UInt, edge: TLEdge) =
edge.client.clients.map { c =>
!c.sourceId.contains(source) ||
c.visibility.map(_.contains(address)).reduce(_ || _)
}.reduce(_ && _)
def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = {
//switch this flag to turn on diplomacy in error messages
def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n"
monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra)
// Reuse these subexpressions to save some firrtl lines
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility")
//The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B
//TODO: check for acquireT?
when (bundle.opcode === TLMessages.AcquireBlock) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AcquirePerm) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra)
monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra)
monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra)
monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra)
}
}
def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = {
monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility")
// Reuse these subexpressions to save some firrtl lines
val address_ok = edge.manager.containsSafe(edge.address(bundle))
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source
when (bundle.opcode === TLMessages.Probe) {
assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra)
assume (address_ok, "'B' channel Probe carries unmanaged address" + extra)
assume (legal_source, "'B' channel Probe carries source that is not first source" + extra)
assume (is_aligned, "'B' channel Probe address not aligned to size" + extra)
assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra)
assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra)
assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra)
monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra)
monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra)
}
}
def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = {
monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val address_ok = edge.manager.containsSafe(edge.address(bundle))
monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility")
when (bundle.opcode === TLMessages.ProbeAck) {
monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ProbeAckData) {
monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.Release) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ReleaseData) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra)
}
}
def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = {
assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val sink_ok = bundle.sink < edge.manager.endSinkId.U
val deny_put_ok = edge.manager.mayDenyPut.B
val deny_get_ok = edge.manager.mayDenyGet.B
when (bundle.opcode === TLMessages.ReleaseAck) {
assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra)
assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra)
assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra)
}
when (bundle.opcode === TLMessages.Grant) {
assume (source_ok, "'D' channel Grant carries invalid source ID" + extra)
assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra)
assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra)
}
when (bundle.opcode === TLMessages.GrantData) {
assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra)
assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra)
}
}
def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = {
val sink_ok = bundle.sink < edge.manager.endSinkId.U
monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra)
}
def legalizeFormat(bundle: TLBundle, edge: TLEdge) = {
when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) }
when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) }
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) }
when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) }
when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) }
} else {
monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra)
monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra)
monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra)
}
}
def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = {
val a_first = edge.first(a.bits, a.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (a.valid && !a_first) {
monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra)
monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra)
monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra)
monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra)
monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra)
}
when (a.fire && a_first) {
opcode := a.bits.opcode
param := a.bits.param
size := a.bits.size
source := a.bits.source
address := a.bits.address
}
}
def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = {
val b_first = edge.first(b.bits, b.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (b.valid && !b_first) {
monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra)
monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra)
monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra)
monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra)
monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra)
}
when (b.fire && b_first) {
opcode := b.bits.opcode
param := b.bits.param
size := b.bits.size
source := b.bits.source
address := b.bits.address
}
}
def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = {
// Symbolic variable
val sym_source = Wire(UInt(edge.client.endSourceId.W))
// TODO: Connect sym_source to a fixed value for simulation and to a
// free wire in formal
sym_source := 0.U
// Type casting Int to UInt
val maxSourceId = Wire(UInt(edge.client.endSourceId.W))
maxSourceId := edge.client.endSourceId.U
// Delayed verison of sym_source
val sym_source_d = Reg(UInt(edge.client.endSourceId.W))
sym_source_d := sym_source
// These will be constraints for FV setup
Property(
MonitorDirection.Monitor,
(sym_source === sym_source_d),
"sym_source should remain stable",
PropertyClass.Default)
Property(
MonitorDirection.Monitor,
(sym_source <= maxSourceId),
"sym_source should take legal value",
PropertyClass.Default)
val my_resp_pend = RegInit(false.B)
val my_opcode = Reg(UInt())
val my_size = Reg(UInt())
val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire)
val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire)
val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source)
val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source)
val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat)
val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend)
when (my_set_resp_pend) {
my_resp_pend := true.B
} .elsewhen (my_clr_resp_pend) {
my_resp_pend := false.B
}
when (my_a_first_beat) {
my_opcode := bundle.a.bits.opcode
my_size := bundle.a.bits.size
}
val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size)
val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode)
val my_resp_opcode_legal = Wire(Bool())
when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) ||
(my_resp_opcode === TLMessages.LogicalData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData)
} .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck)
} .otherwise {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck)
}
monAssert (IfThen(my_resp_pend, !my_a_first_beat),
"Request message should not be sent with a source ID, for which a response message" +
"is already pending (not received until current cycle) for a prior request message" +
"with the same source ID" + extra)
assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)),
"Response message should be accepted with a source ID only if a request message with the" +
"same source ID has been accepted or is being accepted in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)),
"Response message should be sent with a source ID only if a request message with the" +
"same source ID has been accepted or is being sent in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)),
"If d_valid is 1, then d_size should be same as a_size of the corresponding request" +
"message" + extra)
assume (IfThen(my_d_first_beat, my_resp_opcode_legal),
"If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" +
"request message" + extra)
}
def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = {
val c_first = edge.first(c.bits, c.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (c.valid && !c_first) {
monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra)
monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra)
monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra)
monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra)
monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra)
}
when (c.fire && c_first) {
opcode := c.bits.opcode
param := c.bits.param
size := c.bits.size
source := c.bits.source
address := c.bits.address
}
}
def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = {
val d_first = edge.first(d.bits, d.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val sink = Reg(UInt())
val denied = Reg(Bool())
when (d.valid && !d_first) {
assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra)
assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra)
assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra)
assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra)
assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra)
assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra)
}
when (d.fire && d_first) {
opcode := d.bits.opcode
param := d.bits.param
size := d.bits.size
source := d.bits.source
sink := d.bits.sink
denied := d.bits.denied
}
}
def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = {
legalizeMultibeatA(bundle.a, edge)
legalizeMultibeatD(bundle.d, edge)
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
legalizeMultibeatB(bundle.b, edge)
legalizeMultibeatC(bundle.c, edge)
}
}
//This is left in for almond which doesn't adhere to the tilelink protocol
@deprecated("Use legalizeADSource instead if possible","")
def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.client.endSourceId.W))
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val a_set = WireInit(0.U(edge.client.endSourceId.W))
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra)
}
if (edge.manager.minLatency > 0) {
assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = {
val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size)
val log_a_size_bus_size = log2Ceil(a_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error
inflight.suggestName("inflight")
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
inflight_opcodes.suggestName("inflight_opcodes")
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
inflight_sizes.suggestName("inflight_sizes")
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
a_first.suggestName("a_first")
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
d_first.suggestName("d_first")
val a_set = WireInit(0.U(edge.client.endSourceId.W))
val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
a_set.suggestName("a_set")
a_set_wo_ready.suggestName("a_set_wo_ready")
val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
a_opcodes_set.suggestName("a_opcodes_set")
val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
a_sizes_set.suggestName("a_sizes_set")
val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W))
a_opcode_lookup.suggestName("a_opcode_lookup")
a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U
val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W))
a_size_lookup.suggestName("a_size_lookup")
a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U
val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant))
val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant))
val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W))
a_opcodes_set_interm.suggestName("a_opcodes_set_interm")
val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W))
a_sizes_set_interm.suggestName("a_sizes_set_interm")
when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) {
a_set_wo_ready := UIntToOH(bundle.a.bits.source)
}
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U
a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U
a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U)
a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U)
monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
d_opcodes_clr.suggestName("d_opcodes_clr")
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) ||
(bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra)
assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) ||
(bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra)
assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) {
assume((!bundle.d.ready) || bundle.a.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = {
val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size)
val log_c_size_bus_size = log2Ceil(c_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W))
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
inflight.suggestName("inflight")
inflight_opcodes.suggestName("inflight_opcodes")
inflight_sizes.suggestName("inflight_sizes")
val c_first = edge.first(bundle.c.bits, bundle.c.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
c_first.suggestName("c_first")
d_first.suggestName("d_first")
val c_set = WireInit(0.U(edge.client.endSourceId.W))
val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
c_set.suggestName("c_set")
c_set_wo_ready.suggestName("c_set_wo_ready")
c_opcodes_set.suggestName("c_opcodes_set")
c_sizes_set.suggestName("c_sizes_set")
val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W))
val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W))
c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U
c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U
c_opcode_lookup.suggestName("c_opcode_lookup")
c_size_lookup.suggestName("c_size_lookup")
val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W))
val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W))
c_opcodes_set_interm.suggestName("c_opcodes_set_interm")
c_sizes_set_interm.suggestName("c_sizes_set_interm")
when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) {
c_set_wo_ready := UIntToOH(bundle.c.bits.source)
}
when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) {
c_set := UIntToOH(bundle.c.bits.source)
c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U
c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U
c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U)
c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U)
monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra)
}
val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
d_opcodes_clr.suggestName("d_opcodes_clr")
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) {
assume((!bundle.d.ready) || bundle.c.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
when (c_set_wo_ready.orR) {
assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra)
}
}
inflight := (inflight | c_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.manager.endSinkId.W))
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val e_first = true.B
val d_set = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) {
d_set := UIntToOH(bundle.d.bits.sink)
assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra)
}
val e_clr = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) {
e_clr := UIntToOH(bundle.e.bits.sink)
monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra)
}
// edge.client.minLatency applies to BC, not DE
inflight := (inflight | d_set) & ~e_clr
}
def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = {
val sourceBits = log2Ceil(edge.client.endSourceId)
val tooBig = 14 // >16kB worth of flight information gets to be too much
if (sourceBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked")
} else {
if (args.edge.params(TestplanTestType).simulation) {
if (args.edge.params(TLMonitorStrictMode)) {
legalizeADSource(bundle, edge)
legalizeCDSource(bundle, edge)
} else {
legalizeADSourceOld(bundle, edge)
}
}
if (args.edge.params(TestplanTestType).formal) {
legalizeADSourceFormal(bundle, edge)
}
}
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
// legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize...
val sinkBits = log2Ceil(edge.manager.endSinkId)
if (sinkBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked")
} else {
legalizeDESink(bundle, edge)
}
}
}
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = {
legalizeFormat (bundle, edge)
legalizeMultibeat (bundle, edge)
legalizeUnique (bundle, edge)
}
}
File Misc.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
import chisel3.util.random.LFSR
import org.chipsalliance.cde.config.Parameters
import scala.math._
class ParameterizedBundle(implicit p: Parameters) extends Bundle
trait Clocked extends Bundle {
val clock = Clock()
val reset = Bool()
}
object DecoupledHelper {
def apply(rvs: Bool*) = new DecoupledHelper(rvs)
}
class DecoupledHelper(val rvs: Seq[Bool]) {
def fire(exclude: Bool, includes: Bool*) = {
require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!")
(rvs.filter(_ ne exclude) ++ includes).reduce(_ && _)
}
def fire() = {
rvs.reduce(_ && _)
}
}
object MuxT {
def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2))
def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3))
def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4))
}
/** Creates a cascade of n MuxTs to search for a key value. */
object MuxTLookup {
def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
}
object ValidMux {
def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = {
apply(v1 +: v2.toSeq)
}
def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = {
val out = Wire(Valid(valids.head.bits.cloneType))
out.valid := valids.map(_.valid).reduce(_ || _)
out.bits := MuxCase(valids.head.bits,
valids.map(v => (v.valid -> v.bits)))
out
}
}
object Str
{
def apply(s: String): UInt = {
var i = BigInt(0)
require(s.forall(validChar _))
for (c <- s)
i = (i << 8) | c
i.U((s.length*8).W)
}
def apply(x: Char): UInt = {
require(validChar(x))
x.U(8.W)
}
def apply(x: UInt): UInt = apply(x, 10)
def apply(x: UInt, radix: Int): UInt = {
val rad = radix.U
val w = x.getWidth
require(w > 0)
var q = x
var s = digit(q % rad)
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s)
}
s
}
def apply(x: SInt): UInt = apply(x, 10)
def apply(x: SInt, radix: Int): UInt = {
val neg = x < 0.S
val abs = x.abs.asUInt
if (radix != 10) {
Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix))
} else {
val rad = radix.U
val w = abs.getWidth
require(w > 0)
var q = abs
var s = digit(q % rad)
var needSign = neg
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
val placeSpace = q === 0.U
val space = Mux(needSign, Str('-'), Str(' '))
needSign = needSign && !placeSpace
s = Cat(Mux(placeSpace, space, digit(q % rad)), s)
}
Cat(Mux(needSign, Str('-'), Str(' ')), s)
}
}
private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0)
private def validChar(x: Char) = x == (x & 0xFF)
}
object Split
{
def apply(x: UInt, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n2: Int, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
}
object Random
{
def apply(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0)
else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod))
}
def apply(mod: Int): UInt = apply(mod, randomizer)
def oneHot(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0))
else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt
}
def oneHot(mod: Int): UInt = oneHot(mod, randomizer)
private def randomizer = LFSR(16)
private def partition(value: UInt, slices: Int) =
Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U)
}
object Majority {
def apply(in: Set[Bool]): Bool = {
val n = (in.size >> 1) + 1
val clauses = in.subsets(n).map(_.reduce(_ && _))
clauses.reduce(_ || _)
}
def apply(in: Seq[Bool]): Bool = apply(in.toSet)
def apply(in: UInt): Bool = apply(in.asBools.toSet)
}
object PopCountAtLeast {
private def two(x: UInt): (Bool, Bool) = x.getWidth match {
case 1 => (x.asBool, false.B)
case n =>
val half = x.getWidth / 2
val (leftOne, leftTwo) = two(x(half - 1, 0))
val (rightOne, rightTwo) = two(x(x.getWidth - 1, half))
(leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne))
}
def apply(x: UInt, n: Int): Bool = n match {
case 0 => true.B
case 1 => x.orR
case 2 => two(x)._2
case 3 => PopCount(x) >= n.U
}
}
// This gets used everywhere, so make the smallest circuit possible ...
// Given an address and size, create a mask of beatBytes size
// eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111
// groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01
object MaskGen {
def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = {
require (groupBy >= 1 && beatBytes >= groupBy)
require (isPow2(beatBytes) && isPow2(groupBy))
val lgBytes = log2Ceil(beatBytes)
val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U
def helper(i: Int): Seq[(Bool, Bool)] = {
if (i == 0) {
Seq((lgSize >= lgBytes.asUInt, true.B))
} else {
val sub = helper(i-1)
val size = sizeOH(lgBytes - i)
val bit = addr_lo(lgBytes - i)
val nbit = !bit
Seq.tabulate (1 << i) { j =>
val (sub_acc, sub_eq) = sub(j/2)
val eq = sub_eq && (if (j % 2 == 1) bit else nbit)
val acc = sub_acc || (size && eq)
(acc, eq)
}
}
}
if (groupBy == beatBytes) 1.U else
Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse)
}
}
File PlusArg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.experimental._
import chisel3.util.HasBlackBoxResource
@deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05")
case class PlusArgInfo(default: BigInt, docstring: String)
/** Case class for PlusArg information
*
* @tparam A scala type of the PlusArg value
* @param default optional default value
* @param docstring text to include in the help
* @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT)
*/
private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String)
/** Typeclass for converting a type to a doctype string
* @tparam A some type
*/
trait Doctypeable[A] {
/** Return the doctype string for some option */
def toDoctype(a: Option[A]): String
}
/** Object containing implementations of the Doctypeable typeclass */
object Doctypes {
/** Converts an Int => "INT" */
implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" }
/** Converts a BigInt => "INT" */
implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" }
/** Converts a String => "STRING" */
implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" }
}
class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map(
"FORMAT" -> StringParam(format),
"DEFAULT" -> IntParam(default),
"WIDTH" -> IntParam(width)
)) with HasBlackBoxResource {
val io = IO(new Bundle {
val out = Output(UInt(width.W))
})
addResource("/vsrc/plusarg_reader.v")
}
/* This wrapper class has no outputs, making it clear it is a simulation-only construct */
class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module {
val io = IO(new Bundle {
val count = Input(UInt(width.W))
})
val max = Module(new plusarg_reader(format, default, docstring, width)).io.out
when (max > 0.U) {
assert (io.count < max, s"Timeout exceeded: $docstring")
}
}
import Doctypes._
object PlusArg
{
/** PlusArg("foo") will return 42.U if the simulation is run with +foo=42
* Do not use this as an initial register value. The value is set in an
* initial block and thus accessing it from another initial is racey.
* Add a docstring to document the arg, which can be dumped in an elaboration
* pass.
*/
def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out
}
/** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert
* to kill the simulation when count exceeds the specified integer argument.
* Default 0 will never assert.
*/
def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count
}
}
object PlusArgArtefacts {
private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty
/* Add a new PlusArg */
@deprecated(
"Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08",
"Rocket Chip 2020.05"
)
def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring)
/** Add a new PlusArg
*
* @tparam A scala type of the PlusArg value
* @param name name for the PlusArg
* @param default optional default value
* @param docstring text to include in the help
*/
def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit =
artefacts = artefacts ++
Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default)))
/* From plus args, generate help text */
private def serializeHelp_cHeader(tab: String = ""): String = artefacts
.map{ case(arg, info) =>
s"""|$tab+$arg=${info.doctype}\\n\\
|$tab${" "*20}${info.docstring}\\n\\
|""".stripMargin ++ info.default.map{ case default =>
s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("")
}.toSeq.mkString("\\n\\\n") ++ "\""
/* From plus args, generate a char array of their names */
private def serializeArray_cHeader(tab: String = ""): String = {
val prettyTab = tab + " " * 44 // Length of 'static const ...'
s"${tab}static const char * verilog_plusargs [] = {\\\n" ++
artefacts
.map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" }
.mkString("")++
s"${prettyTab}0};"
}
/* Generate C code to be included in emulator.cc that helps with
* argument parsing based on available Verilog PlusArgs */
def serialize_cHeader(): String =
s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\
|${serializeHelp_cHeader(" "*7)}
|${serializeArray_cHeader()}
|""".stripMargin
}
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
File Bundles.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import freechips.rocketchip.util._
import scala.collection.immutable.ListMap
import chisel3.util.Decoupled
import chisel3.util.DecoupledIO
import chisel3.reflect.DataMirror
abstract class TLBundleBase(val params: TLBundleParameters) extends Bundle
// common combos in lazy policy:
// Put + Acquire
// Release + AccessAck
object TLMessages
{
// A B C D E
def PutFullData = 0.U // . . => AccessAck
def PutPartialData = 1.U // . . => AccessAck
def ArithmeticData = 2.U // . . => AccessAckData
def LogicalData = 3.U // . . => AccessAckData
def Get = 4.U // . . => AccessAckData
def Hint = 5.U // . . => HintAck
def AcquireBlock = 6.U // . => Grant[Data]
def AcquirePerm = 7.U // . => Grant[Data]
def Probe = 6.U // . => ProbeAck[Data]
def AccessAck = 0.U // . .
def AccessAckData = 1.U // . .
def HintAck = 2.U // . .
def ProbeAck = 4.U // .
def ProbeAckData = 5.U // .
def Release = 6.U // . => ReleaseAck
def ReleaseData = 7.U // . => ReleaseAck
def Grant = 4.U // . => GrantAck
def GrantData = 5.U // . => GrantAck
def ReleaseAck = 6.U // .
def GrantAck = 0.U // .
def isA(x: UInt) = x <= AcquirePerm
def isB(x: UInt) = x <= Probe
def isC(x: UInt) = x <= ReleaseData
def isD(x: UInt) = x <= ReleaseAck
def adResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, Grant, Grant)
def bcResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, ProbeAck, ProbeAck)
def a = Seq( ("PutFullData",TLPermissions.PermMsgReserved),
("PutPartialData",TLPermissions.PermMsgReserved),
("ArithmeticData",TLAtomics.ArithMsg),
("LogicalData",TLAtomics.LogicMsg),
("Get",TLPermissions.PermMsgReserved),
("Hint",TLHints.HintsMsg),
("AcquireBlock",TLPermissions.PermMsgGrow),
("AcquirePerm",TLPermissions.PermMsgGrow))
def b = Seq( ("PutFullData",TLPermissions.PermMsgReserved),
("PutPartialData",TLPermissions.PermMsgReserved),
("ArithmeticData",TLAtomics.ArithMsg),
("LogicalData",TLAtomics.LogicMsg),
("Get",TLPermissions.PermMsgReserved),
("Hint",TLHints.HintsMsg),
("Probe",TLPermissions.PermMsgCap))
def c = Seq( ("AccessAck",TLPermissions.PermMsgReserved),
("AccessAckData",TLPermissions.PermMsgReserved),
("HintAck",TLPermissions.PermMsgReserved),
("Invalid Opcode",TLPermissions.PermMsgReserved),
("ProbeAck",TLPermissions.PermMsgReport),
("ProbeAckData",TLPermissions.PermMsgReport),
("Release",TLPermissions.PermMsgReport),
("ReleaseData",TLPermissions.PermMsgReport))
def d = Seq( ("AccessAck",TLPermissions.PermMsgReserved),
("AccessAckData",TLPermissions.PermMsgReserved),
("HintAck",TLPermissions.PermMsgReserved),
("Invalid Opcode",TLPermissions.PermMsgReserved),
("Grant",TLPermissions.PermMsgCap),
("GrantData",TLPermissions.PermMsgCap),
("ReleaseAck",TLPermissions.PermMsgReserved))
}
/**
* The three primary TileLink permissions are:
* (T)runk: the agent is (or is on inwards path to) the global point of serialization.
* (B)ranch: the agent is on an outwards path to
* (N)one:
* These permissions are permuted by transfer operations in various ways.
* Operations can cap permissions, request for them to be grown or shrunk,
* or for a report on their current status.
*/
object TLPermissions
{
val aWidth = 2
val bdWidth = 2
val cWidth = 3
// Cap types (Grant = new permissions, Probe = permisions <= target)
def toT = 0.U(bdWidth.W)
def toB = 1.U(bdWidth.W)
def toN = 2.U(bdWidth.W)
def isCap(x: UInt) = x <= toN
// Grow types (Acquire = permissions >= target)
def NtoB = 0.U(aWidth.W)
def NtoT = 1.U(aWidth.W)
def BtoT = 2.U(aWidth.W)
def isGrow(x: UInt) = x <= BtoT
// Shrink types (ProbeAck, Release)
def TtoB = 0.U(cWidth.W)
def TtoN = 1.U(cWidth.W)
def BtoN = 2.U(cWidth.W)
def isShrink(x: UInt) = x <= BtoN
// Report types (ProbeAck, Release)
def TtoT = 3.U(cWidth.W)
def BtoB = 4.U(cWidth.W)
def NtoN = 5.U(cWidth.W)
def isReport(x: UInt) = x <= NtoN
def PermMsgGrow:Seq[String] = Seq("Grow NtoB", "Grow NtoT", "Grow BtoT")
def PermMsgCap:Seq[String] = Seq("Cap toT", "Cap toB", "Cap toN")
def PermMsgReport:Seq[String] = Seq("Shrink TtoB", "Shrink TtoN", "Shrink BtoN", "Report TotT", "Report BtoB", "Report NtoN")
def PermMsgReserved:Seq[String] = Seq("Reserved")
}
object TLAtomics
{
val width = 3
// Arithmetic types
def MIN = 0.U(width.W)
def MAX = 1.U(width.W)
def MINU = 2.U(width.W)
def MAXU = 3.U(width.W)
def ADD = 4.U(width.W)
def isArithmetic(x: UInt) = x <= ADD
// Logical types
def XOR = 0.U(width.W)
def OR = 1.U(width.W)
def AND = 2.U(width.W)
def SWAP = 3.U(width.W)
def isLogical(x: UInt) = x <= SWAP
def ArithMsg:Seq[String] = Seq("MIN", "MAX", "MINU", "MAXU", "ADD")
def LogicMsg:Seq[String] = Seq("XOR", "OR", "AND", "SWAP")
}
object TLHints
{
val width = 1
def PREFETCH_READ = 0.U(width.W)
def PREFETCH_WRITE = 1.U(width.W)
def isHints(x: UInt) = x <= PREFETCH_WRITE
def HintsMsg:Seq[String] = Seq("PrefetchRead", "PrefetchWrite")
}
sealed trait TLChannel extends TLBundleBase {
val channelName: String
}
sealed trait TLDataChannel extends TLChannel
sealed trait TLAddrChannel extends TLDataChannel
final class TLBundleA(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleA_${params.shortName}"
val channelName = "'A' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(List(TLAtomics.width, TLPermissions.aWidth, TLHints.width).max.W) // amo_opcode || grow perms || hint
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // from
val address = UInt(params.addressBits.W) // to
val user = BundleMap(params.requestFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val mask = UInt((params.dataBits/8).W)
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleB(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleB_${params.shortName}"
val channelName = "'B' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.bdWidth.W) // cap perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // to
val address = UInt(params.addressBits.W) // from
// variable fields during multibeat:
val mask = UInt((params.dataBits/8).W)
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleC(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleC_${params.shortName}"
val channelName = "'C' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.cWidth.W) // shrink or report perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // from
val address = UInt(params.addressBits.W) // to
val user = BundleMap(params.requestFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleD(params: TLBundleParameters)
extends TLBundleBase(params) with TLDataChannel
{
override def typeName = s"TLBundleD_${params.shortName}"
val channelName = "'D' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.bdWidth.W) // cap perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // to
val sink = UInt(params.sinkBits.W) // from
val denied = Bool() // implies corrupt iff *Data
val user = BundleMap(params.responseFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleE(params: TLBundleParameters)
extends TLBundleBase(params) with TLChannel
{
override def typeName = s"TLBundleE_${params.shortName}"
val channelName = "'E' channel"
val sink = UInt(params.sinkBits.W) // to
}
class TLBundle(val params: TLBundleParameters) extends Record
{
// Emulate a Bundle with elements abcde or ad depending on params.hasBCE
private val optA = Some (Decoupled(new TLBundleA(params)))
private val optB = params.hasBCE.option(Flipped(Decoupled(new TLBundleB(params))))
private val optC = params.hasBCE.option(Decoupled(new TLBundleC(params)))
private val optD = Some (Flipped(Decoupled(new TLBundleD(params))))
private val optE = params.hasBCE.option(Decoupled(new TLBundleE(params)))
def a: DecoupledIO[TLBundleA] = optA.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleA(params)))))
def b: DecoupledIO[TLBundleB] = optB.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleB(params)))))
def c: DecoupledIO[TLBundleC] = optC.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleC(params)))))
def d: DecoupledIO[TLBundleD] = optD.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleD(params)))))
def e: DecoupledIO[TLBundleE] = optE.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleE(params)))))
val elements =
if (params.hasBCE) ListMap("e" -> e, "d" -> d, "c" -> c, "b" -> b, "a" -> a)
else ListMap("d" -> d, "a" -> a)
def tieoff(): Unit = {
DataMirror.specifiedDirectionOf(a.ready) match {
case SpecifiedDirection.Input =>
a.ready := false.B
c.ready := false.B
e.ready := false.B
b.valid := false.B
d.valid := false.B
case SpecifiedDirection.Output =>
a.valid := false.B
c.valid := false.B
e.valid := false.B
b.ready := false.B
d.ready := false.B
case _ =>
}
}
}
object TLBundle
{
def apply(params: TLBundleParameters) = new TLBundle(params)
}
class TLAsyncBundleBase(val params: TLAsyncBundleParameters) extends Bundle
class TLAsyncBundle(params: TLAsyncBundleParameters) extends TLAsyncBundleBase(params)
{
val a = new AsyncBundle(new TLBundleA(params.base), params.async)
val b = Flipped(new AsyncBundle(new TLBundleB(params.base), params.async))
val c = new AsyncBundle(new TLBundleC(params.base), params.async)
val d = Flipped(new AsyncBundle(new TLBundleD(params.base), params.async))
val e = new AsyncBundle(new TLBundleE(params.base), params.async)
}
class TLRationalBundle(params: TLBundleParameters) extends TLBundleBase(params)
{
val a = RationalIO(new TLBundleA(params))
val b = Flipped(RationalIO(new TLBundleB(params)))
val c = RationalIO(new TLBundleC(params))
val d = Flipped(RationalIO(new TLBundleD(params)))
val e = RationalIO(new TLBundleE(params))
}
class TLCreditedBundle(params: TLBundleParameters) extends TLBundleBase(params)
{
val a = CreditedIO(new TLBundleA(params))
val b = Flipped(CreditedIO(new TLBundleB(params)))
val c = CreditedIO(new TLBundleC(params))
val d = Flipped(CreditedIO(new TLBundleD(params)))
val e = CreditedIO(new TLBundleE(params))
}
File Parameters.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.diplomacy
import chisel3._
import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor}
import freechips.rocketchip.util.ShiftQueue
/** Options for describing the attributes of memory regions */
object RegionType {
// Define the 'more relaxed than' ordering
val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS)
sealed trait T extends Ordered[T] {
def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this)
}
case object CACHED extends T // an intermediate agent may have cached a copy of the region for you
case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided
case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible
case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached
case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects
case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed
case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively
}
// A non-empty half-open range; [start, end)
case class IdRange(start: Int, end: Int) extends Ordered[IdRange]
{
require (start >= 0, s"Ids cannot be negative, but got: $start.")
require (start <= end, "Id ranges cannot be negative.")
def compare(x: IdRange) = {
val primary = (this.start - x.start).signum
val secondary = (x.end - this.end).signum
if (primary != 0) primary else secondary
}
def overlaps(x: IdRange) = start < x.end && x.start < end
def contains(x: IdRange) = start <= x.start && x.end <= end
def contains(x: Int) = start <= x && x < end
def contains(x: UInt) =
if (size == 0) {
false.B
} else if (size == 1) { // simple comparison
x === start.U
} else {
// find index of largest different bit
val largestDeltaBit = log2Floor(start ^ (end-1))
val smallestCommonBit = largestDeltaBit + 1 // may not exist in x
val uncommonMask = (1 << smallestCommonBit) - 1
val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0)
// the prefix must match exactly (note: may shift ALL bits away)
(x >> smallestCommonBit) === (start >> smallestCommonBit).U &&
// firrtl constant prop range analysis can eliminate these two:
(start & uncommonMask).U <= uncommonBits &&
uncommonBits <= ((end-1) & uncommonMask).U
}
def shift(x: Int) = IdRange(start+x, end+x)
def size = end - start
def isEmpty = end == start
def range = start until end
}
object IdRange
{
def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else {
val ranges = s.sorted
(ranges.tail zip ranges.init) find { case (a, b) => a overlaps b }
}
}
// An potentially empty inclusive range of 2-powers [min, max] (in bytes)
case class TransferSizes(min: Int, max: Int)
{
def this(x: Int) = this(x, x)
require (min <= max, s"Min transfer $min > max transfer $max")
require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)")
require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max")
require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min")
require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)")
def none = min == 0
def contains(x: Int) = isPow2(x) && min <= x && x <= max
def containsLg(x: Int) = contains(1 << x)
def containsLg(x: UInt) =
if (none) false.B
else if (min == max) { log2Ceil(min).U === x }
else { log2Ceil(min).U <= x && x <= log2Ceil(max).U }
def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max)
def intersect(x: TransferSizes) =
if (x.max < min || max < x.min) TransferSizes.none
else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max))
// Not a union, because the result may contain sizes contained by neither term
// NOT TO BE CONFUSED WITH COVERPOINTS
def mincover(x: TransferSizes) = {
if (none) {
x
} else if (x.none) {
this
} else {
TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max))
}
}
override def toString() = "TransferSizes[%d, %d]".format(min, max)
}
object TransferSizes {
def apply(x: Int) = new TransferSizes(x)
val none = new TransferSizes(0)
def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _)
def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _)
implicit def asBool(x: TransferSizes) = !x.none
}
// AddressSets specify the address space managed by the manager
// Base is the base address, and mask are the bits consumed by the manager
// e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff
// e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ...
case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet]
{
// Forbid misaligned base address (and empty sets)
require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}")
require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous
// We do allow negative mask (=> ignore all high bits)
def contains(x: BigInt) = ((x ^ base) & ~mask) == 0
def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S
// turn x into an address contained in this set
def legalize(x: UInt): UInt = base.U | (mask.U & x)
// overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1)
def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0
// contains iff bitwise: x.mask => mask && contains(x.base)
def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0
// The number of bytes to which the manager must be aligned
def alignment = ((mask + 1) & ~mask)
// Is this a contiguous memory range
def contiguous = alignment == mask+1
def finite = mask >= 0
def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask }
// Widen the match function to ignore all bits in imask
def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask)
// Return an AddressSet that only contains the addresses both sets contain
def intersect(x: AddressSet): Option[AddressSet] = {
if (!overlaps(x)) {
None
} else {
val r_mask = mask & x.mask
val r_base = base | x.base
Some(AddressSet(r_base, r_mask))
}
}
def subtract(x: AddressSet): Seq[AddressSet] = {
intersect(x) match {
case None => Seq(this)
case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit =>
val nmask = (mask & (bit-1)) | remove.mask
val nbase = (remove.base ^ bit) & ~nmask
AddressSet(nbase, nmask)
}
}
}
// AddressSets have one natural Ordering (the containment order, if contiguous)
def compare(x: AddressSet) = {
val primary = (this.base - x.base).signum // smallest address first
val secondary = (x.mask - this.mask).signum // largest mask first
if (primary != 0) primary else secondary
}
// We always want to see things in hex
override def toString() = {
if (mask >= 0) {
"AddressSet(0x%x, 0x%x)".format(base, mask)
} else {
"AddressSet(0x%x, ~0x%x)".format(base, ~mask)
}
}
def toRanges = {
require (finite, "Ranges cannot be calculated on infinite mask")
val size = alignment
val fragments = mask & ~(size-1)
val bits = bitIndexes(fragments)
(BigInt(0) until (BigInt(1) << bits.size)).map { i =>
val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) }
AddressRange(off, size)
}
}
}
object AddressSet
{
val everything = AddressSet(0, -1)
def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = {
if (size == 0) tail.reverse else {
val maxBaseAlignment = base & (-base) // 0 for infinite (LSB)
val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size
val step =
if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment)
maxSizeAlignment else maxBaseAlignment
misaligned(base+step, size-step, AddressSet(base, step-1) +: tail)
}
}
def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = {
// Pair terms up by ignoring 'bit'
seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) =>
if (seq.size == 1) {
seq.head // singleton -> unaffected
} else {
key.copy(mask = key.mask | bit) // pair - widen mask by bit
}
}.toList
}
def unify(seq: Seq[AddressSet]): Seq[AddressSet] = {
val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _)
AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted
}
def enumerateMask(mask: BigInt): Seq[BigInt] = {
def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] =
if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail)
helper(0, Nil)
}
def enumerateBits(mask: BigInt): Seq[BigInt] = {
def helper(x: BigInt): Seq[BigInt] = {
if (x == 0) {
Nil
} else {
val bit = x & (-x)
bit +: helper(x & ~bit)
}
}
helper(mask)
}
}
case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean)
{
require (depth >= 0, "Buffer depth must be >= 0")
def isDefined = depth > 0
def latency = if (isDefined && !flow) 1 else 0
def apply[T <: Data](x: DecoupledIO[T]) =
if (isDefined) Queue(x, depth, flow=flow, pipe=pipe)
else x
def irrevocable[T <: Data](x: ReadyValidIO[T]) =
if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe)
else x
def sq[T <: Data](x: DecoupledIO[T]) =
if (!isDefined) x else {
val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe))
sq.io.enq <> x
sq.io.deq
}
override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "")
}
object BufferParams
{
implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false)
val default = BufferParams(2)
val none = BufferParams(0)
val flow = BufferParams(1, true, false)
val pipe = BufferParams(1, false, true)
}
case class TriStateValue(value: Boolean, set: Boolean)
{
def update(orig: Boolean) = if (set) value else orig
}
object TriStateValue
{
implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true)
def unset = TriStateValue(false, false)
}
trait DirectedBuffers[T] {
def copyIn(x: BufferParams): T
def copyOut(x: BufferParams): T
def copyInOut(x: BufferParams): T
}
trait IdMapEntry {
def name: String
def from: IdRange
def to: IdRange
def isCache: Boolean
def requestFifo: Boolean
def maxTransactionsInFlight: Option[Int]
def pretty(fmt: String) =
if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5
fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
} else {
fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
}
}
abstract class IdMap[T <: IdMapEntry] {
protected val fmt: String
val mapping: Seq[T]
def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n")
}
File Edges.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
class TLEdge(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdgeParameters(client, manager, params, sourceInfo)
{
def isAligned(address: UInt, lgSize: UInt): Bool = {
if (maxLgSize == 0) true.B else {
val mask = UIntToOH1(lgSize, maxLgSize)
(address & mask) === 0.U
}
}
def mask(address: UInt, lgSize: UInt): UInt =
MaskGen(address, lgSize, manager.beatBytes)
def staticHasData(bundle: TLChannel): Option[Boolean] = {
bundle match {
case _:TLBundleA => {
// Do there exist A messages with Data?
val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial
// Do there exist A messages without Data?
val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint
// Statically optimize the case where hasData is a constant
if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None
}
case _:TLBundleB => {
// Do there exist B messages with Data?
val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial
// Do there exist B messages without Data?
val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint
// Statically optimize the case where hasData is a constant
if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None
}
case _:TLBundleC => {
// Do there eixst C messages with Data?
val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe
// Do there exist C messages without Data?
val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe
if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None
}
case _:TLBundleD => {
// Do there eixst D messages with Data?
val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB
// Do there exist D messages without Data?
val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT
if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None
}
case _:TLBundleE => Some(false)
}
}
def isRequest(x: TLChannel): Bool = {
x match {
case a: TLBundleA => true.B
case b: TLBundleB => true.B
case c: TLBundleC => c.opcode(2) && c.opcode(1)
// opcode === TLMessages.Release ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(2) && !d.opcode(1)
// opcode === TLMessages.Grant ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
}
def isResponse(x: TLChannel): Bool = {
x match {
case a: TLBundleA => false.B
case b: TLBundleB => false.B
case c: TLBundleC => !c.opcode(2) || !c.opcode(1)
// opcode =/= TLMessages.Release &&
// opcode =/= TLMessages.ReleaseData
case d: TLBundleD => true.B // Grant isResponse + isRequest
case e: TLBundleE => true.B
}
}
def hasData(x: TLChannel): Bool = {
val opdata = x match {
case a: TLBundleA => !a.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case b: TLBundleB => !b.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case c: TLBundleC => c.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.ProbeAckData ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
staticHasData(x).map(_.B).getOrElse(opdata)
}
def opcode(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.opcode
case b: TLBundleB => b.opcode
case c: TLBundleC => c.opcode
case d: TLBundleD => d.opcode
}
}
def param(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.param
case b: TLBundleB => b.param
case c: TLBundleC => c.param
case d: TLBundleD => d.param
}
}
def size(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.size
case b: TLBundleB => b.size
case c: TLBundleC => c.size
case d: TLBundleD => d.size
}
}
def data(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.data
case b: TLBundleB => b.data
case c: TLBundleC => c.data
case d: TLBundleD => d.data
}
}
def corrupt(x: TLDataChannel): Bool = {
x match {
case a: TLBundleA => a.corrupt
case b: TLBundleB => b.corrupt
case c: TLBundleC => c.corrupt
case d: TLBundleD => d.corrupt
}
}
def mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.mask
case b: TLBundleB => b.mask
case c: TLBundleC => mask(c.address, c.size)
}
}
def full_mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => mask(a.address, a.size)
case b: TLBundleB => mask(b.address, b.size)
case c: TLBundleC => mask(c.address, c.size)
}
}
def address(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.address
case b: TLBundleB => b.address
case c: TLBundleC => c.address
}
}
def source(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.source
case b: TLBundleB => b.source
case c: TLBundleC => c.source
case d: TLBundleD => d.source
}
}
def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes)
def addr_lo(x: UInt): UInt =
if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0)
def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x))
def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x))
def numBeats(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 1.U
case bundle: TLDataChannel => {
val hasData = this.hasData(bundle)
val size = this.size(bundle)
val cutoff = log2Ceil(manager.beatBytes)
val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U
val decode = UIntToOH(size, maxLgSize+1) >> cutoff
Mux(hasData, decode | small.asUInt, 1.U)
}
}
}
def numBeats1(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 0.U
case bundle: TLDataChannel => {
if (maxLgSize == 0) {
0.U
} else {
val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes)
Mux(hasData(bundle), decode, 0.U)
}
}
}
}
def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val beats1 = numBeats1(bits)
val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W))
val counter1 = counter - 1.U
val first = counter === 0.U
val last = counter === 1.U || beats1 === 0.U
val done = last && fire
val count = (beats1 & ~counter1)
when (fire) {
counter := Mux(first, beats1, counter1)
}
(first, last, done, count)
}
def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1
def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire)
def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid)
def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2
def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire)
def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid)
def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3
def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire)
def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid)
def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3)
}
def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire)
def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid)
def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4)
}
def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire)
def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid)
def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes))
}
def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire)
def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid)
// Does the request need T permissions to be executed?
def needT(a: TLBundleA): Bool = {
val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLPermissions.NtoB -> false.B,
TLPermissions.NtoT -> true.B,
TLPermissions.BtoT -> true.B))
MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array(
TLMessages.PutFullData -> true.B,
TLMessages.PutPartialData -> true.B,
TLMessages.ArithmeticData -> true.B,
TLMessages.LogicalData -> true.B,
TLMessages.Get -> false.B,
TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLHints.PREFETCH_READ -> false.B,
TLHints.PREFETCH_WRITE -> true.B)),
TLMessages.AcquireBlock -> acq_needT,
TLMessages.AcquirePerm -> acq_needT))
}
// This is a very expensive circuit; use only if you really mean it!
def inFlight(x: TLBundle): (UInt, UInt) = {
val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W))
val bce = manager.anySupportAcquireB && client.anySupportProbe
val (a_first, a_last, _) = firstlast(x.a)
val (b_first, b_last, _) = firstlast(x.b)
val (c_first, c_last, _) = firstlast(x.c)
val (d_first, d_last, _) = firstlast(x.d)
val (e_first, e_last, _) = firstlast(x.e)
val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits))
val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits))
val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits))
val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits))
val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits))
val a_inc = x.a.fire && a_first && a_request
val b_inc = x.b.fire && b_first && b_request
val c_inc = x.c.fire && c_first && c_request
val d_inc = x.d.fire && d_first && d_request
val e_inc = x.e.fire && e_first && e_request
val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil))
val a_dec = x.a.fire && a_last && a_response
val b_dec = x.b.fire && b_last && b_response
val c_dec = x.c.fire && c_last && c_response
val d_dec = x.d.fire && d_last && d_response
val e_dec = x.e.fire && e_last && e_response
val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil))
val next_flight = flight + PopCount(inc) - PopCount(dec)
flight := next_flight
(flight, next_flight)
}
def prettySourceMapping(context: String): String = {
s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n"
}
}
class TLEdgeOut(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
// Transfers
def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquireBlock
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquirePerm
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.Release
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ReleaseData
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) =
Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B)
def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAck
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions, data)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAckData
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B)
def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink)
def GrantAck(toSink: UInt): TLBundleE = {
val e = Wire(new TLBundleE(bundle))
e.sink := toSink
e
}
// Accesses
def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsGetFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Get
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutFullFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutFullData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, mask, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutPartialFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutPartialData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = {
require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsArithmeticFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.ArithmeticData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsLogicalFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.LogicalData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = {
require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsHintFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Hint
a.param := param
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data)
def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAckData
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size)
def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.HintAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
}
class TLEdgeIn(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = {
val todo = x.filter(!_.isEmpty)
val heads = todo.map(_.head)
val tails = todo.map(_.tail)
if (todo.isEmpty) Nil else { heads +: myTranspose(tails) }
}
// Transfers
def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = {
require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}")
val legal = client.supportsProbe(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Probe
b.param := capPermissions
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.Grant
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.GrantData
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B)
def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.ReleaseAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
// Accesses
def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = {
require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsGet(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Get
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsPutFull(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutFullData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, mask, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsPutPartial(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutPartialData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsArithmetic(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.ArithmeticData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsLogical(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.LogicalData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = {
require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsHint(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Hint
b.param := param
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size)
def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied)
def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data)
def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAckData
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B)
def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied)
def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B)
def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.HintAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
}
| module TLMonitor_28( // @[Monitor.scala:36:7]
input clock, // @[Monitor.scala:36:7]
input reset, // @[Monitor.scala:36:7]
input io_in_a_ready, // @[Monitor.scala:20:14]
input io_in_a_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_size, // @[Monitor.scala:20:14]
input [6:0] io_in_a_bits_source, // @[Monitor.scala:20:14]
input [20:0] io_in_a_bits_address, // @[Monitor.scala:20:14]
input [7:0] io_in_a_bits_mask, // @[Monitor.scala:20:14]
input [63:0] io_in_a_bits_data, // @[Monitor.scala:20:14]
input io_in_a_bits_corrupt, // @[Monitor.scala:20:14]
input io_in_d_ready, // @[Monitor.scala:20:14]
input io_in_d_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14]
input [1:0] io_in_d_bits_param, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_size, // @[Monitor.scala:20:14]
input [6:0] io_in_d_bits_source, // @[Monitor.scala:20:14]
input io_in_d_bits_sink, // @[Monitor.scala:20:14]
input io_in_d_bits_denied, // @[Monitor.scala:20:14]
input [63:0] io_in_d_bits_data, // @[Monitor.scala:20:14]
input io_in_d_bits_corrupt // @[Monitor.scala:20:14]
);
wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11]
wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11]
wire io_in_a_ready_0 = io_in_a_ready; // @[Monitor.scala:36:7]
wire io_in_a_valid_0 = io_in_a_valid; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_opcode_0 = io_in_a_bits_opcode; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_param_0 = io_in_a_bits_param; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_size_0 = io_in_a_bits_size; // @[Monitor.scala:36:7]
wire [6:0] io_in_a_bits_source_0 = io_in_a_bits_source; // @[Monitor.scala:36:7]
wire [20:0] io_in_a_bits_address_0 = io_in_a_bits_address; // @[Monitor.scala:36:7]
wire [7:0] io_in_a_bits_mask_0 = io_in_a_bits_mask; // @[Monitor.scala:36:7]
wire [63:0] io_in_a_bits_data_0 = io_in_a_bits_data; // @[Monitor.scala:36:7]
wire io_in_a_bits_corrupt_0 = io_in_a_bits_corrupt; // @[Monitor.scala:36:7]
wire io_in_d_ready_0 = io_in_d_ready; // @[Monitor.scala:36:7]
wire io_in_d_valid_0 = io_in_d_valid; // @[Monitor.scala:36:7]
wire [2:0] io_in_d_bits_opcode_0 = io_in_d_bits_opcode; // @[Monitor.scala:36:7]
wire [1:0] io_in_d_bits_param_0 = io_in_d_bits_param; // @[Monitor.scala:36:7]
wire [2:0] io_in_d_bits_size_0 = io_in_d_bits_size; // @[Monitor.scala:36:7]
wire [6:0] io_in_d_bits_source_0 = io_in_d_bits_source; // @[Monitor.scala:36:7]
wire io_in_d_bits_sink_0 = io_in_d_bits_sink; // @[Monitor.scala:36:7]
wire io_in_d_bits_denied_0 = io_in_d_bits_denied; // @[Monitor.scala:36:7]
wire [63:0] io_in_d_bits_data_0 = io_in_d_bits_data; // @[Monitor.scala:36:7]
wire io_in_d_bits_corrupt_0 = io_in_d_bits_corrupt; // @[Monitor.scala:36:7]
wire sink_ok = 1'h0; // @[Monitor.scala:309:31]
wire _c_first_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_T = 1'h0; // @[Decoupled.scala:51:35]
wire c_first_beats1_opdata = 1'h0; // @[Edges.scala:102:36]
wire _c_first_last_T = 1'h0; // @[Edges.scala:232:25]
wire c_first_done = 1'h0; // @[Edges.scala:233:22]
wire _c_set_wo_ready_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_wo_ready_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_wo_ready_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_T = 1'h0; // @[Monitor.scala:772:47]
wire _c_probe_ack_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_T_1 = 1'h0; // @[Monitor.scala:772:95]
wire c_probe_ack = 1'h0; // @[Monitor.scala:772:71]
wire _same_cycle_resp_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_T_3 = 1'h0; // @[Monitor.scala:795:44]
wire _same_cycle_resp_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_T_4 = 1'h0; // @[Edges.scala:68:36]
wire _same_cycle_resp_T_5 = 1'h0; // @[Edges.scala:68:51]
wire _same_cycle_resp_T_6 = 1'h0; // @[Edges.scala:68:40]
wire _same_cycle_resp_T_7 = 1'h0; // @[Monitor.scala:795:55]
wire _same_cycle_resp_WIRE_4_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_4_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_4_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_5_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_5_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_5_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire same_cycle_resp_1 = 1'h0; // @[Monitor.scala:795:88]
wire [2:0] responseMap_0 = 3'h0; // @[Monitor.scala:643:42]
wire [2:0] responseMap_1 = 3'h0; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_0 = 3'h0; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_1 = 3'h0; // @[Monitor.scala:644:42]
wire [2:0] _c_first_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] c_first_beats1_decode = 3'h0; // @[Edges.scala:220:59]
wire [2:0] c_first_beats1 = 3'h0; // @[Edges.scala:221:14]
wire [2:0] _c_first_count_T = 3'h0; // @[Edges.scala:234:27]
wire [2:0] c_first_count = 3'h0; // @[Edges.scala:234:25]
wire [2:0] _c_first_counter_T = 3'h0; // @[Edges.scala:236:21]
wire [2:0] _c_set_wo_ready_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_4_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_4_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_4_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_5_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_5_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_5_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire _source_ok_T_3 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_5 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_9 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_11 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_15 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_17 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_21 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_23 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_27 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_35 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_55 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_57 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_61 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_63 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_67 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_69 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_73 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_75 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_79 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_87 = 1'h1; // @[Parameters.scala:56:32]
wire c_first = 1'h1; // @[Edges.scala:231:25]
wire _c_first_last_T_1 = 1'h1; // @[Edges.scala:232:43]
wire c_first_last = 1'h1; // @[Edges.scala:232:33]
wire [2:0] c_first_counter1 = 3'h7; // @[Edges.scala:230:28]
wire [3:0] _c_first_counter1_T = 4'hF; // @[Edges.scala:230:28]
wire [63:0] _c_first_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_first_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_first_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_first_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_set_wo_ready_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_set_wo_ready_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_opcodes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_opcodes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_sizes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_sizes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_opcodes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_opcodes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_sizes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_sizes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_probe_ack_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_probe_ack_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_probe_ack_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_probe_ack_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_4_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_5_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_first_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_first_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_first_WIRE_2_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_first_WIRE_3_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_set_wo_ready_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_set_wo_ready_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_set_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_set_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_opcodes_set_interm_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_opcodes_set_interm_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_sizes_set_interm_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_sizes_set_interm_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_opcodes_set_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_opcodes_set_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_sizes_set_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_sizes_set_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_probe_ack_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_probe_ack_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_probe_ack_WIRE_2_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_probe_ack_WIRE_3_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _same_cycle_resp_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _same_cycle_resp_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _same_cycle_resp_WIRE_2_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _same_cycle_resp_WIRE_3_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _same_cycle_resp_WIRE_4_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _same_cycle_resp_WIRE_5_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_first_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_first_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_first_WIRE_2_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_first_WIRE_3_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_set_wo_ready_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_set_wo_ready_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_set_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_set_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_opcodes_set_interm_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_opcodes_set_interm_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_sizes_set_interm_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_sizes_set_interm_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_opcodes_set_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_opcodes_set_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_sizes_set_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_sizes_set_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_probe_ack_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_probe_ack_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_probe_ack_WIRE_2_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_probe_ack_WIRE_3_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _same_cycle_resp_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _same_cycle_resp_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _same_cycle_resp_WIRE_2_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _same_cycle_resp_WIRE_3_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _same_cycle_resp_WIRE_4_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _same_cycle_resp_WIRE_5_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [15:0] _a_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _a_size_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _d_opcodes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _d_sizes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _c_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _c_size_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _d_opcodes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _d_sizes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57]
wire [16:0] _a_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _a_size_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _d_opcodes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _d_sizes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _c_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _c_size_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _d_opcodes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _d_sizes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57]
wire [15:0] _a_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _a_size_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _d_opcodes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _d_sizes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _c_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _c_size_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _d_opcodes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _d_sizes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51]
wire [1026:0] _c_opcodes_set_T_1 = 1027'h0; // @[Monitor.scala:767:54]
wire [1026:0] _c_sizes_set_T_1 = 1027'h0; // @[Monitor.scala:768:52]
wire [9:0] _c_opcodes_set_T = 10'h0; // @[Monitor.scala:767:79]
wire [9:0] _c_sizes_set_T = 10'h0; // @[Monitor.scala:768:77]
wire [3:0] _c_opcodes_set_interm_T_1 = 4'h1; // @[Monitor.scala:765:61]
wire [3:0] _c_sizes_set_interm_T_1 = 4'h1; // @[Monitor.scala:766:59]
wire [3:0] c_opcodes_set_interm = 4'h0; // @[Monitor.scala:754:40]
wire [3:0] c_sizes_set_interm = 4'h0; // @[Monitor.scala:755:40]
wire [3:0] _c_opcodes_set_interm_T = 4'h0; // @[Monitor.scala:765:53]
wire [3:0] _c_sizes_set_interm_T = 4'h0; // @[Monitor.scala:766:51]
wire [127:0] _c_set_wo_ready_T = 128'h1; // @[OneHot.scala:58:35]
wire [127:0] _c_set_T = 128'h1; // @[OneHot.scala:58:35]
wire [259:0] c_opcodes_set = 260'h0; // @[Monitor.scala:740:34]
wire [259:0] c_sizes_set = 260'h0; // @[Monitor.scala:741:34]
wire [64:0] c_set = 65'h0; // @[Monitor.scala:738:34]
wire [64:0] c_set_wo_ready = 65'h0; // @[Monitor.scala:739:34]
wire [5:0] _c_first_beats1_decode_T_2 = 6'h0; // @[package.scala:243:46]
wire [5:0] _c_first_beats1_decode_T_1 = 6'h3F; // @[package.scala:243:76]
wire [12:0] _c_first_beats1_decode_T = 13'h3F; // @[package.scala:243:71]
wire [2:0] responseMap_6 = 3'h4; // @[Monitor.scala:643:42]
wire [2:0] responseMap_7 = 3'h4; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_7 = 3'h4; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_6 = 3'h5; // @[Monitor.scala:644:42]
wire [2:0] responseMap_5 = 3'h2; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_5 = 3'h2; // @[Monitor.scala:644:42]
wire [2:0] responseMap_2 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMap_3 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMap_4 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_2 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_3 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_4 = 3'h1; // @[Monitor.scala:644:42]
wire [3:0] _a_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:637:123]
wire [3:0] _a_size_lookup_T_2 = 4'h4; // @[Monitor.scala:641:117]
wire [3:0] _d_opcodes_clr_T = 4'h4; // @[Monitor.scala:680:48]
wire [3:0] _d_sizes_clr_T = 4'h4; // @[Monitor.scala:681:48]
wire [3:0] _c_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:749:123]
wire [3:0] _c_size_lookup_T_2 = 4'h4; // @[Monitor.scala:750:119]
wire [3:0] _d_opcodes_clr_T_6 = 4'h4; // @[Monitor.scala:790:48]
wire [3:0] _d_sizes_clr_T_6 = 4'h4; // @[Monitor.scala:791:48]
wire [2:0] _mask_sizeOH_T = io_in_a_bits_size_0; // @[Misc.scala:202:34]
wire [6:0] _source_ok_uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_4 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_5 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_4 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_5 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_6 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_7 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_8 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_9 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_10 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_11 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_12 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_13 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_14 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_15 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_16 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_17 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_18 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_19 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_20 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_21 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_22 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_23 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_24 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_25 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_26 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_27 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_28 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_29 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_30 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_31 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_32 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_33 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_34 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_35 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_36 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_37 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_38 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_39 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_40 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_41 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_42 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_43 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_44 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_45 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_46 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_47 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_48 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_49 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_50 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_51 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_52 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_53 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_54 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_55 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_56 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_57 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_58 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_59 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_60 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_61 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_62 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_63 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_64 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_65 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_6 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_7 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_8 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_9 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_10 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_11 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire _source_ok_T = io_in_a_bits_source_0 == 7'h10; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_0 = _source_ok_T; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits = _source_ok_uncommonBits_T[1:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] _source_ok_T_1 = io_in_a_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_7 = io_in_a_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_13 = io_in_a_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_19 = io_in_a_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_25 = io_in_a_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_33 = io_in_a_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire _source_ok_T_2 = _source_ok_T_1 == 5'h0; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_4 = _source_ok_T_2; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_6 = _source_ok_T_4; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1 = _source_ok_T_6; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_1 = _source_ok_uncommonBits_T_1[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_8 = _source_ok_T_7 == 5'h1; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_10 = _source_ok_T_8; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_12 = _source_ok_T_10; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_2 = _source_ok_T_12; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_2 = _source_ok_uncommonBits_T_2[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_14 = _source_ok_T_13 == 5'h2; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_16 = _source_ok_T_14; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_18 = _source_ok_T_16; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_3 = _source_ok_T_18; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_3 = _source_ok_uncommonBits_T_3[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_20 = _source_ok_T_19 == 5'h3; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_22 = _source_ok_T_20; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_24 = _source_ok_T_22; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_4 = _source_ok_T_24; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_4 = _source_ok_uncommonBits_T_4[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_26 = _source_ok_T_25 == 5'hA; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_28 = _source_ok_T_26; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_29 = source_ok_uncommonBits_4 != 2'h3; // @[Parameters.scala:52:56, :57:20]
wire _source_ok_T_30 = _source_ok_T_28 & _source_ok_T_29; // @[Parameters.scala:54:67, :56:48, :57:20]
wire _source_ok_WIRE_5 = _source_ok_T_30; // @[Parameters.scala:1138:31]
wire _source_ok_T_31 = io_in_a_bits_source_0 == 7'h2B; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_6 = _source_ok_T_31; // @[Parameters.scala:1138:31]
wire _source_ok_T_32 = io_in_a_bits_source_0 == 7'h2C; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_7 = _source_ok_T_32; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_5 = _source_ok_uncommonBits_T_5[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_34 = _source_ok_T_33 == 5'h8; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_36 = _source_ok_T_34; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_37 = source_ok_uncommonBits_5 != 2'h3; // @[Parameters.scala:52:56, :57:20]
wire _source_ok_T_38 = _source_ok_T_36 & _source_ok_T_37; // @[Parameters.scala:54:67, :56:48, :57:20]
wire _source_ok_WIRE_8 = _source_ok_T_38; // @[Parameters.scala:1138:31]
wire _source_ok_T_39 = io_in_a_bits_source_0 == 7'h23; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_9 = _source_ok_T_39; // @[Parameters.scala:1138:31]
wire _source_ok_T_40 = io_in_a_bits_source_0 == 7'h24; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_10 = _source_ok_T_40; // @[Parameters.scala:1138:31]
wire _source_ok_T_41 = io_in_a_bits_source_0 == 7'h40; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_11 = _source_ok_T_41; // @[Parameters.scala:1138:31]
wire _source_ok_T_42 = _source_ok_WIRE_0 | _source_ok_WIRE_1; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_43 = _source_ok_T_42 | _source_ok_WIRE_2; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_44 = _source_ok_T_43 | _source_ok_WIRE_3; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_45 = _source_ok_T_44 | _source_ok_WIRE_4; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_46 = _source_ok_T_45 | _source_ok_WIRE_5; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_47 = _source_ok_T_46 | _source_ok_WIRE_6; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_48 = _source_ok_T_47 | _source_ok_WIRE_7; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_49 = _source_ok_T_48 | _source_ok_WIRE_8; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_50 = _source_ok_T_49 | _source_ok_WIRE_9; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_51 = _source_ok_T_50 | _source_ok_WIRE_10; // @[Parameters.scala:1138:31, :1139:46]
wire source_ok = _source_ok_T_51 | _source_ok_WIRE_11; // @[Parameters.scala:1138:31, :1139:46]
wire [12:0] _GEN = 13'h3F << io_in_a_bits_size_0; // @[package.scala:243:71]
wire [12:0] _is_aligned_mask_T; // @[package.scala:243:71]
assign _is_aligned_mask_T = _GEN; // @[package.scala:243:71]
wire [12:0] _a_first_beats1_decode_T; // @[package.scala:243:71]
assign _a_first_beats1_decode_T = _GEN; // @[package.scala:243:71]
wire [12:0] _a_first_beats1_decode_T_3; // @[package.scala:243:71]
assign _a_first_beats1_decode_T_3 = _GEN; // @[package.scala:243:71]
wire [5:0] _is_aligned_mask_T_1 = _is_aligned_mask_T[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] is_aligned_mask = ~_is_aligned_mask_T_1; // @[package.scala:243:{46,76}]
wire [20:0] _is_aligned_T = {15'h0, io_in_a_bits_address_0[5:0] & is_aligned_mask}; // @[package.scala:243:46]
wire is_aligned = _is_aligned_T == 21'h0; // @[Edges.scala:21:{16,24}]
wire [1:0] mask_sizeOH_shiftAmount = _mask_sizeOH_T[1:0]; // @[OneHot.scala:64:49]
wire [3:0] _mask_sizeOH_T_1 = 4'h1 << mask_sizeOH_shiftAmount; // @[OneHot.scala:64:49, :65:12]
wire [2:0] _mask_sizeOH_T_2 = _mask_sizeOH_T_1[2:0]; // @[OneHot.scala:65:{12,27}]
wire [2:0] mask_sizeOH = {_mask_sizeOH_T_2[2:1], 1'h1}; // @[OneHot.scala:65:27]
wire mask_sub_sub_sub_0_1 = io_in_a_bits_size_0 > 3'h2; // @[Misc.scala:206:21]
wire mask_sub_sub_size = mask_sizeOH[2]; // @[Misc.scala:202:81, :209:26]
wire mask_sub_sub_bit = io_in_a_bits_address_0[2]; // @[Misc.scala:210:26]
wire mask_sub_sub_1_2 = mask_sub_sub_bit; // @[Misc.scala:210:26, :214:27]
wire mask_sub_sub_nbit = ~mask_sub_sub_bit; // @[Misc.scala:210:26, :211:20]
wire mask_sub_sub_0_2 = mask_sub_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_sub_acc_T = mask_sub_sub_size & mask_sub_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_sub_0_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T; // @[Misc.scala:206:21, :215:{29,38}]
wire _mask_sub_sub_acc_T_1 = mask_sub_sub_size & mask_sub_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_sub_1_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T_1; // @[Misc.scala:206:21, :215:{29,38}]
wire mask_sub_size = mask_sizeOH[1]; // @[Misc.scala:202:81, :209:26]
wire mask_sub_bit = io_in_a_bits_address_0[1]; // @[Misc.scala:210:26]
wire mask_sub_nbit = ~mask_sub_bit; // @[Misc.scala:210:26, :211:20]
wire mask_sub_0_2 = mask_sub_sub_0_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_acc_T = mask_sub_size & mask_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_0_1 = mask_sub_sub_0_1 | _mask_sub_acc_T; // @[Misc.scala:215:{29,38}]
wire mask_sub_1_2 = mask_sub_sub_0_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_sub_acc_T_1 = mask_sub_size & mask_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_1_1 = mask_sub_sub_0_1 | _mask_sub_acc_T_1; // @[Misc.scala:215:{29,38}]
wire mask_sub_2_2 = mask_sub_sub_1_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_acc_T_2 = mask_sub_size & mask_sub_2_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_2_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_2; // @[Misc.scala:215:{29,38}]
wire mask_sub_3_2 = mask_sub_sub_1_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_sub_acc_T_3 = mask_sub_size & mask_sub_3_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_3_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_3; // @[Misc.scala:215:{29,38}]
wire mask_size = mask_sizeOH[0]; // @[Misc.scala:202:81, :209:26]
wire mask_bit = io_in_a_bits_address_0[0]; // @[Misc.scala:210:26]
wire mask_nbit = ~mask_bit; // @[Misc.scala:210:26, :211:20]
wire mask_eq = mask_sub_0_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T = mask_size & mask_eq; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc = mask_sub_0_1 | _mask_acc_T; // @[Misc.scala:215:{29,38}]
wire mask_eq_1 = mask_sub_0_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_1 = mask_size & mask_eq_1; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_1 = mask_sub_0_1 | _mask_acc_T_1; // @[Misc.scala:215:{29,38}]
wire mask_eq_2 = mask_sub_1_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_2 = mask_size & mask_eq_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_2 = mask_sub_1_1 | _mask_acc_T_2; // @[Misc.scala:215:{29,38}]
wire mask_eq_3 = mask_sub_1_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_3 = mask_size & mask_eq_3; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_3 = mask_sub_1_1 | _mask_acc_T_3; // @[Misc.scala:215:{29,38}]
wire mask_eq_4 = mask_sub_2_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_4 = mask_size & mask_eq_4; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_4 = mask_sub_2_1 | _mask_acc_T_4; // @[Misc.scala:215:{29,38}]
wire mask_eq_5 = mask_sub_2_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_5 = mask_size & mask_eq_5; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_5 = mask_sub_2_1 | _mask_acc_T_5; // @[Misc.scala:215:{29,38}]
wire mask_eq_6 = mask_sub_3_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_6 = mask_size & mask_eq_6; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_6 = mask_sub_3_1 | _mask_acc_T_6; // @[Misc.scala:215:{29,38}]
wire mask_eq_7 = mask_sub_3_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_7 = mask_size & mask_eq_7; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_7 = mask_sub_3_1 | _mask_acc_T_7; // @[Misc.scala:215:{29,38}]
wire [1:0] mask_lo_lo = {mask_acc_1, mask_acc}; // @[Misc.scala:215:29, :222:10]
wire [1:0] mask_lo_hi = {mask_acc_3, mask_acc_2}; // @[Misc.scala:215:29, :222:10]
wire [3:0] mask_lo = {mask_lo_hi, mask_lo_lo}; // @[Misc.scala:222:10]
wire [1:0] mask_hi_lo = {mask_acc_5, mask_acc_4}; // @[Misc.scala:215:29, :222:10]
wire [1:0] mask_hi_hi = {mask_acc_7, mask_acc_6}; // @[Misc.scala:215:29, :222:10]
wire [3:0] mask_hi = {mask_hi_hi, mask_hi_lo}; // @[Misc.scala:222:10]
wire [7:0] mask = {mask_hi, mask_lo}; // @[Misc.scala:222:10]
wire [1:0] uncommonBits = _uncommonBits_T[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_1 = _uncommonBits_T_1[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_2 = _uncommonBits_T_2[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_3 = _uncommonBits_T_3[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_4 = _uncommonBits_T_4[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_5 = _uncommonBits_T_5[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_6 = _uncommonBits_T_6[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_7 = _uncommonBits_T_7[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_8 = _uncommonBits_T_8[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_9 = _uncommonBits_T_9[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_10 = _uncommonBits_T_10[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_11 = _uncommonBits_T_11[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_12 = _uncommonBits_T_12[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_13 = _uncommonBits_T_13[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_14 = _uncommonBits_T_14[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_15 = _uncommonBits_T_15[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_16 = _uncommonBits_T_16[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_17 = _uncommonBits_T_17[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_18 = _uncommonBits_T_18[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_19 = _uncommonBits_T_19[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_20 = _uncommonBits_T_20[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_21 = _uncommonBits_T_21[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_22 = _uncommonBits_T_22[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_23 = _uncommonBits_T_23[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_24 = _uncommonBits_T_24[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_25 = _uncommonBits_T_25[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_26 = _uncommonBits_T_26[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_27 = _uncommonBits_T_27[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_28 = _uncommonBits_T_28[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_29 = _uncommonBits_T_29[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_30 = _uncommonBits_T_30[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_31 = _uncommonBits_T_31[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_32 = _uncommonBits_T_32[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_33 = _uncommonBits_T_33[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_34 = _uncommonBits_T_34[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_35 = _uncommonBits_T_35[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_36 = _uncommonBits_T_36[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_37 = _uncommonBits_T_37[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_38 = _uncommonBits_T_38[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_39 = _uncommonBits_T_39[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_40 = _uncommonBits_T_40[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_41 = _uncommonBits_T_41[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_42 = _uncommonBits_T_42[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_43 = _uncommonBits_T_43[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_44 = _uncommonBits_T_44[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_45 = _uncommonBits_T_45[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_46 = _uncommonBits_T_46[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_47 = _uncommonBits_T_47[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_48 = _uncommonBits_T_48[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_49 = _uncommonBits_T_49[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_50 = _uncommonBits_T_50[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_51 = _uncommonBits_T_51[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_52 = _uncommonBits_T_52[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_53 = _uncommonBits_T_53[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_54 = _uncommonBits_T_54[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_55 = _uncommonBits_T_55[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_56 = _uncommonBits_T_56[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_57 = _uncommonBits_T_57[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_58 = _uncommonBits_T_58[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_59 = _uncommonBits_T_59[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_60 = _uncommonBits_T_60[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_61 = _uncommonBits_T_61[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_62 = _uncommonBits_T_62[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_63 = _uncommonBits_T_63[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_64 = _uncommonBits_T_64[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_65 = _uncommonBits_T_65[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_52 = io_in_d_bits_source_0 == 7'h10; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_0 = _source_ok_T_52; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_6 = _source_ok_uncommonBits_T_6[1:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] _source_ok_T_53 = io_in_d_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_59 = io_in_d_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_65 = io_in_d_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_71 = io_in_d_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_77 = io_in_d_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_85 = io_in_d_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire _source_ok_T_54 = _source_ok_T_53 == 5'h0; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_56 = _source_ok_T_54; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_58 = _source_ok_T_56; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_1 = _source_ok_T_58; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_7 = _source_ok_uncommonBits_T_7[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_60 = _source_ok_T_59 == 5'h1; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_62 = _source_ok_T_60; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_64 = _source_ok_T_62; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_2 = _source_ok_T_64; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_8 = _source_ok_uncommonBits_T_8[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_66 = _source_ok_T_65 == 5'h2; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_68 = _source_ok_T_66; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_70 = _source_ok_T_68; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_3 = _source_ok_T_70; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_9 = _source_ok_uncommonBits_T_9[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_72 = _source_ok_T_71 == 5'h3; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_74 = _source_ok_T_72; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_76 = _source_ok_T_74; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_4 = _source_ok_T_76; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_10 = _source_ok_uncommonBits_T_10[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_78 = _source_ok_T_77 == 5'hA; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_80 = _source_ok_T_78; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_81 = source_ok_uncommonBits_10 != 2'h3; // @[Parameters.scala:52:56, :57:20]
wire _source_ok_T_82 = _source_ok_T_80 & _source_ok_T_81; // @[Parameters.scala:54:67, :56:48, :57:20]
wire _source_ok_WIRE_1_5 = _source_ok_T_82; // @[Parameters.scala:1138:31]
wire _source_ok_T_83 = io_in_d_bits_source_0 == 7'h2B; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_6 = _source_ok_T_83; // @[Parameters.scala:1138:31]
wire _source_ok_T_84 = io_in_d_bits_source_0 == 7'h2C; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_7 = _source_ok_T_84; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_11 = _source_ok_uncommonBits_T_11[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_86 = _source_ok_T_85 == 5'h8; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_88 = _source_ok_T_86; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_89 = source_ok_uncommonBits_11 != 2'h3; // @[Parameters.scala:52:56, :57:20]
wire _source_ok_T_90 = _source_ok_T_88 & _source_ok_T_89; // @[Parameters.scala:54:67, :56:48, :57:20]
wire _source_ok_WIRE_1_8 = _source_ok_T_90; // @[Parameters.scala:1138:31]
wire _source_ok_T_91 = io_in_d_bits_source_0 == 7'h23; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_9 = _source_ok_T_91; // @[Parameters.scala:1138:31]
wire _source_ok_T_92 = io_in_d_bits_source_0 == 7'h24; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_10 = _source_ok_T_92; // @[Parameters.scala:1138:31]
wire _source_ok_T_93 = io_in_d_bits_source_0 == 7'h40; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_11 = _source_ok_T_93; // @[Parameters.scala:1138:31]
wire _source_ok_T_94 = _source_ok_WIRE_1_0 | _source_ok_WIRE_1_1; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_95 = _source_ok_T_94 | _source_ok_WIRE_1_2; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_96 = _source_ok_T_95 | _source_ok_WIRE_1_3; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_97 = _source_ok_T_96 | _source_ok_WIRE_1_4; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_98 = _source_ok_T_97 | _source_ok_WIRE_1_5; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_99 = _source_ok_T_98 | _source_ok_WIRE_1_6; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_100 = _source_ok_T_99 | _source_ok_WIRE_1_7; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_101 = _source_ok_T_100 | _source_ok_WIRE_1_8; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_102 = _source_ok_T_101 | _source_ok_WIRE_1_9; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_103 = _source_ok_T_102 | _source_ok_WIRE_1_10; // @[Parameters.scala:1138:31, :1139:46]
wire source_ok_1 = _source_ok_T_103 | _source_ok_WIRE_1_11; // @[Parameters.scala:1138:31, :1139:46]
wire _T_1306 = io_in_a_ready_0 & io_in_a_valid_0; // @[Decoupled.scala:51:35]
wire _a_first_T; // @[Decoupled.scala:51:35]
assign _a_first_T = _T_1306; // @[Decoupled.scala:51:35]
wire _a_first_T_1; // @[Decoupled.scala:51:35]
assign _a_first_T_1 = _T_1306; // @[Decoupled.scala:51:35]
wire [5:0] _a_first_beats1_decode_T_1 = _a_first_beats1_decode_T[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _a_first_beats1_decode_T_2 = ~_a_first_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire [2:0] a_first_beats1_decode = _a_first_beats1_decode_T_2[5:3]; // @[package.scala:243:46]
wire _a_first_beats1_opdata_T = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7]
wire _a_first_beats1_opdata_T_1 = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7]
wire a_first_beats1_opdata = ~_a_first_beats1_opdata_T; // @[Edges.scala:92:{28,37}]
wire [2:0] a_first_beats1 = a_first_beats1_opdata ? a_first_beats1_decode : 3'h0; // @[Edges.scala:92:28, :220:59, :221:14]
reg [2:0] a_first_counter; // @[Edges.scala:229:27]
wire [3:0] _a_first_counter1_T = {1'h0, a_first_counter} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] a_first_counter1 = _a_first_counter1_T[2:0]; // @[Edges.scala:230:28]
wire a_first = a_first_counter == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _a_first_last_T = a_first_counter == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _a_first_last_T_1 = a_first_beats1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire a_first_last = _a_first_last_T | _a_first_last_T_1; // @[Edges.scala:232:{25,33,43}]
wire a_first_done = a_first_last & _a_first_T; // @[Decoupled.scala:51:35]
wire [2:0] _a_first_count_T = ~a_first_counter1; // @[Edges.scala:230:28, :234:27]
wire [2:0] a_first_count = a_first_beats1 & _a_first_count_T; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _a_first_counter_T = a_first ? a_first_beats1 : a_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
reg [2:0] opcode; // @[Monitor.scala:387:22]
reg [2:0] param; // @[Monitor.scala:388:22]
reg [2:0] size; // @[Monitor.scala:389:22]
reg [6:0] source; // @[Monitor.scala:390:22]
reg [20:0] address; // @[Monitor.scala:391:22]
wire _T_1379 = io_in_d_ready_0 & io_in_d_valid_0; // @[Decoupled.scala:51:35]
wire _d_first_T; // @[Decoupled.scala:51:35]
assign _d_first_T = _T_1379; // @[Decoupled.scala:51:35]
wire _d_first_T_1; // @[Decoupled.scala:51:35]
assign _d_first_T_1 = _T_1379; // @[Decoupled.scala:51:35]
wire _d_first_T_2; // @[Decoupled.scala:51:35]
assign _d_first_T_2 = _T_1379; // @[Decoupled.scala:51:35]
wire [12:0] _GEN_0 = 13'h3F << io_in_d_bits_size_0; // @[package.scala:243:71]
wire [12:0] _d_first_beats1_decode_T; // @[package.scala:243:71]
assign _d_first_beats1_decode_T = _GEN_0; // @[package.scala:243:71]
wire [12:0] _d_first_beats1_decode_T_3; // @[package.scala:243:71]
assign _d_first_beats1_decode_T_3 = _GEN_0; // @[package.scala:243:71]
wire [12:0] _d_first_beats1_decode_T_6; // @[package.scala:243:71]
assign _d_first_beats1_decode_T_6 = _GEN_0; // @[package.scala:243:71]
wire [5:0] _d_first_beats1_decode_T_1 = _d_first_beats1_decode_T[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _d_first_beats1_decode_T_2 = ~_d_first_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire [2:0] d_first_beats1_decode = _d_first_beats1_decode_T_2[5:3]; // @[package.scala:243:46]
wire d_first_beats1_opdata = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire d_first_beats1_opdata_1 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire d_first_beats1_opdata_2 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire [2:0] d_first_beats1 = d_first_beats1_opdata ? d_first_beats1_decode : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [2:0] d_first_counter; // @[Edges.scala:229:27]
wire [3:0] _d_first_counter1_T = {1'h0, d_first_counter} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] d_first_counter1 = _d_first_counter1_T[2:0]; // @[Edges.scala:230:28]
wire d_first = d_first_counter == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T = d_first_counter == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_1 = d_first_beats1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last = _d_first_last_T | _d_first_last_T_1; // @[Edges.scala:232:{25,33,43}]
wire d_first_done = d_first_last & _d_first_T; // @[Decoupled.scala:51:35]
wire [2:0] _d_first_count_T = ~d_first_counter1; // @[Edges.scala:230:28, :234:27]
wire [2:0] d_first_count = d_first_beats1 & _d_first_count_T; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _d_first_counter_T = d_first ? d_first_beats1 : d_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
reg [2:0] opcode_1; // @[Monitor.scala:538:22]
reg [1:0] param_1; // @[Monitor.scala:539:22]
reg [2:0] size_1; // @[Monitor.scala:540:22]
reg [6:0] source_1; // @[Monitor.scala:541:22]
reg sink; // @[Monitor.scala:542:22]
reg denied; // @[Monitor.scala:543:22]
reg [64:0] inflight; // @[Monitor.scala:614:27]
reg [259:0] inflight_opcodes; // @[Monitor.scala:616:35]
reg [259:0] inflight_sizes; // @[Monitor.scala:618:33]
wire [5:0] _a_first_beats1_decode_T_4 = _a_first_beats1_decode_T_3[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _a_first_beats1_decode_T_5 = ~_a_first_beats1_decode_T_4; // @[package.scala:243:{46,76}]
wire [2:0] a_first_beats1_decode_1 = _a_first_beats1_decode_T_5[5:3]; // @[package.scala:243:46]
wire a_first_beats1_opdata_1 = ~_a_first_beats1_opdata_T_1; // @[Edges.scala:92:{28,37}]
wire [2:0] a_first_beats1_1 = a_first_beats1_opdata_1 ? a_first_beats1_decode_1 : 3'h0; // @[Edges.scala:92:28, :220:59, :221:14]
reg [2:0] a_first_counter_1; // @[Edges.scala:229:27]
wire [3:0] _a_first_counter1_T_1 = {1'h0, a_first_counter_1} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] a_first_counter1_1 = _a_first_counter1_T_1[2:0]; // @[Edges.scala:230:28]
wire a_first_1 = a_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _a_first_last_T_2 = a_first_counter_1 == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _a_first_last_T_3 = a_first_beats1_1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire a_first_last_1 = _a_first_last_T_2 | _a_first_last_T_3; // @[Edges.scala:232:{25,33,43}]
wire a_first_done_1 = a_first_last_1 & _a_first_T_1; // @[Decoupled.scala:51:35]
wire [2:0] _a_first_count_T_1 = ~a_first_counter1_1; // @[Edges.scala:230:28, :234:27]
wire [2:0] a_first_count_1 = a_first_beats1_1 & _a_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _a_first_counter_T_1 = a_first_1 ? a_first_beats1_1 : a_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [5:0] _d_first_beats1_decode_T_4 = _d_first_beats1_decode_T_3[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _d_first_beats1_decode_T_5 = ~_d_first_beats1_decode_T_4; // @[package.scala:243:{46,76}]
wire [2:0] d_first_beats1_decode_1 = _d_first_beats1_decode_T_5[5:3]; // @[package.scala:243:46]
wire [2:0] d_first_beats1_1 = d_first_beats1_opdata_1 ? d_first_beats1_decode_1 : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [2:0] d_first_counter_1; // @[Edges.scala:229:27]
wire [3:0] _d_first_counter1_T_1 = {1'h0, d_first_counter_1} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] d_first_counter1_1 = _d_first_counter1_T_1[2:0]; // @[Edges.scala:230:28]
wire d_first_1 = d_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T_2 = d_first_counter_1 == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_3 = d_first_beats1_1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last_1 = _d_first_last_T_2 | _d_first_last_T_3; // @[Edges.scala:232:{25,33,43}]
wire d_first_done_1 = d_first_last_1 & _d_first_T_1; // @[Decoupled.scala:51:35]
wire [2:0] _d_first_count_T_1 = ~d_first_counter1_1; // @[Edges.scala:230:28, :234:27]
wire [2:0] d_first_count_1 = d_first_beats1_1 & _d_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _d_first_counter_T_1 = d_first_1 ? d_first_beats1_1 : d_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [64:0] a_set; // @[Monitor.scala:626:34]
wire [64:0] a_set_wo_ready; // @[Monitor.scala:627:34]
wire [259:0] a_opcodes_set; // @[Monitor.scala:630:33]
wire [259:0] a_sizes_set; // @[Monitor.scala:632:31]
wire [2:0] a_opcode_lookup; // @[Monitor.scala:635:35]
wire [9:0] _GEN_1 = {1'h0, io_in_d_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :637:69]
wire [9:0] _a_opcode_lookup_T; // @[Monitor.scala:637:69]
assign _a_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69]
wire [9:0] _a_size_lookup_T; // @[Monitor.scala:641:65]
assign _a_size_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :641:65]
wire [9:0] _d_opcodes_clr_T_4; // @[Monitor.scala:680:101]
assign _d_opcodes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :680:101]
wire [9:0] _d_sizes_clr_T_4; // @[Monitor.scala:681:99]
assign _d_sizes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :681:99]
wire [9:0] _c_opcode_lookup_T; // @[Monitor.scala:749:69]
assign _c_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :749:69]
wire [9:0] _c_size_lookup_T; // @[Monitor.scala:750:67]
assign _c_size_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :750:67]
wire [9:0] _d_opcodes_clr_T_10; // @[Monitor.scala:790:101]
assign _d_opcodes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :790:101]
wire [9:0] _d_sizes_clr_T_10; // @[Monitor.scala:791:99]
assign _d_sizes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :791:99]
wire [259:0] _a_opcode_lookup_T_1 = inflight_opcodes >> _a_opcode_lookup_T; // @[Monitor.scala:616:35, :637:{44,69}]
wire [259:0] _a_opcode_lookup_T_6 = {256'h0, _a_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:637:{44,97}]
wire [259:0] _a_opcode_lookup_T_7 = {1'h0, _a_opcode_lookup_T_6[259:1]}; // @[Monitor.scala:637:{97,152}]
assign a_opcode_lookup = _a_opcode_lookup_T_7[2:0]; // @[Monitor.scala:635:35, :637:{21,152}]
wire [3:0] a_size_lookup; // @[Monitor.scala:639:33]
wire [259:0] _a_size_lookup_T_1 = inflight_sizes >> _a_size_lookup_T; // @[Monitor.scala:618:33, :641:{40,65}]
wire [259:0] _a_size_lookup_T_6 = {256'h0, _a_size_lookup_T_1[3:0]}; // @[Monitor.scala:641:{40,91}]
wire [259:0] _a_size_lookup_T_7 = {1'h0, _a_size_lookup_T_6[259:1]}; // @[Monitor.scala:641:{91,144}]
assign a_size_lookup = _a_size_lookup_T_7[3:0]; // @[Monitor.scala:639:33, :641:{19,144}]
wire [3:0] a_opcodes_set_interm; // @[Monitor.scala:646:40]
wire [3:0] a_sizes_set_interm; // @[Monitor.scala:648:38]
wire _same_cycle_resp_T = io_in_a_valid_0 & a_first_1; // @[Monitor.scala:36:7, :651:26, :684:44]
wire [127:0] _GEN_2 = 128'h1 << io_in_a_bits_source_0; // @[OneHot.scala:58:35]
wire [127:0] _a_set_wo_ready_T; // @[OneHot.scala:58:35]
assign _a_set_wo_ready_T = _GEN_2; // @[OneHot.scala:58:35]
wire [127:0] _a_set_T; // @[OneHot.scala:58:35]
assign _a_set_T = _GEN_2; // @[OneHot.scala:58:35]
assign a_set_wo_ready = _same_cycle_resp_T ? _a_set_wo_ready_T[64:0] : 65'h0; // @[OneHot.scala:58:35]
wire _T_1232 = _T_1306 & a_first_1; // @[Decoupled.scala:51:35]
assign a_set = _T_1232 ? _a_set_T[64:0] : 65'h0; // @[OneHot.scala:58:35]
wire [3:0] _a_opcodes_set_interm_T = {io_in_a_bits_opcode_0, 1'h0}; // @[Monitor.scala:36:7, :657:53]
wire [3:0] _a_opcodes_set_interm_T_1 = {_a_opcodes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:657:{53,61}]
assign a_opcodes_set_interm = _T_1232 ? _a_opcodes_set_interm_T_1 : 4'h0; // @[Monitor.scala:646:40, :655:{25,70}, :657:{28,61}]
wire [3:0] _a_sizes_set_interm_T = {io_in_a_bits_size_0, 1'h0}; // @[Monitor.scala:36:7, :658:51]
wire [3:0] _a_sizes_set_interm_T_1 = {_a_sizes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:658:{51,59}]
assign a_sizes_set_interm = _T_1232 ? _a_sizes_set_interm_T_1 : 4'h0; // @[Monitor.scala:648:38, :655:{25,70}, :658:{28,59}]
wire [9:0] _GEN_3 = {1'h0, io_in_a_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :659:79]
wire [9:0] _a_opcodes_set_T; // @[Monitor.scala:659:79]
assign _a_opcodes_set_T = _GEN_3; // @[Monitor.scala:659:79]
wire [9:0] _a_sizes_set_T; // @[Monitor.scala:660:77]
assign _a_sizes_set_T = _GEN_3; // @[Monitor.scala:659:79, :660:77]
wire [1026:0] _a_opcodes_set_T_1 = {1023'h0, a_opcodes_set_interm} << _a_opcodes_set_T; // @[Monitor.scala:646:40, :659:{54,79}]
assign a_opcodes_set = _T_1232 ? _a_opcodes_set_T_1[259:0] : 260'h0; // @[Monitor.scala:630:33, :655:{25,70}, :659:{28,54}]
wire [1026:0] _a_sizes_set_T_1 = {1023'h0, a_sizes_set_interm} << _a_sizes_set_T; // @[Monitor.scala:648:38, :659:54, :660:{52,77}]
assign a_sizes_set = _T_1232 ? _a_sizes_set_T_1[259:0] : 260'h0; // @[Monitor.scala:632:31, :655:{25,70}, :660:{28,52}]
wire [64:0] d_clr; // @[Monitor.scala:664:34]
wire [64:0] d_clr_wo_ready; // @[Monitor.scala:665:34]
wire [259:0] d_opcodes_clr; // @[Monitor.scala:668:33]
wire [259:0] d_sizes_clr; // @[Monitor.scala:670:31]
wire _GEN_4 = io_in_d_bits_opcode_0 == 3'h6; // @[Monitor.scala:36:7, :673:46]
wire d_release_ack; // @[Monitor.scala:673:46]
assign d_release_ack = _GEN_4; // @[Monitor.scala:673:46]
wire d_release_ack_1; // @[Monitor.scala:783:46]
assign d_release_ack_1 = _GEN_4; // @[Monitor.scala:673:46, :783:46]
wire _T_1278 = io_in_d_valid_0 & d_first_1; // @[Monitor.scala:36:7, :674:26]
wire [127:0] _GEN_5 = 128'h1 << io_in_d_bits_source_0; // @[OneHot.scala:58:35]
wire [127:0] _d_clr_wo_ready_T; // @[OneHot.scala:58:35]
assign _d_clr_wo_ready_T = _GEN_5; // @[OneHot.scala:58:35]
wire [127:0] _d_clr_T; // @[OneHot.scala:58:35]
assign _d_clr_T = _GEN_5; // @[OneHot.scala:58:35]
wire [127:0] _d_clr_wo_ready_T_1; // @[OneHot.scala:58:35]
assign _d_clr_wo_ready_T_1 = _GEN_5; // @[OneHot.scala:58:35]
wire [127:0] _d_clr_T_1; // @[OneHot.scala:58:35]
assign _d_clr_T_1 = _GEN_5; // @[OneHot.scala:58:35]
assign d_clr_wo_ready = _T_1278 & ~d_release_ack ? _d_clr_wo_ready_T[64:0] : 65'h0; // @[OneHot.scala:58:35]
wire _T_1247 = _T_1379 & d_first_1 & ~d_release_ack; // @[Decoupled.scala:51:35]
assign d_clr = _T_1247 ? _d_clr_T[64:0] : 65'h0; // @[OneHot.scala:58:35]
wire [1038:0] _d_opcodes_clr_T_5 = 1039'hF << _d_opcodes_clr_T_4; // @[Monitor.scala:680:{76,101}]
assign d_opcodes_clr = _T_1247 ? _d_opcodes_clr_T_5[259:0] : 260'h0; // @[Monitor.scala:668:33, :678:{25,70,89}, :680:{21,76}]
wire [1038:0] _d_sizes_clr_T_5 = 1039'hF << _d_sizes_clr_T_4; // @[Monitor.scala:681:{74,99}]
assign d_sizes_clr = _T_1247 ? _d_sizes_clr_T_5[259:0] : 260'h0; // @[Monitor.scala:670:31, :678:{25,70,89}, :681:{21,74}]
wire _same_cycle_resp_T_1 = _same_cycle_resp_T; // @[Monitor.scala:684:{44,55}]
wire _same_cycle_resp_T_2 = io_in_a_bits_source_0 == io_in_d_bits_source_0; // @[Monitor.scala:36:7, :684:113]
wire same_cycle_resp = _same_cycle_resp_T_1 & _same_cycle_resp_T_2; // @[Monitor.scala:684:{55,88,113}]
wire [64:0] _inflight_T = inflight | a_set; // @[Monitor.scala:614:27, :626:34, :705:27]
wire [64:0] _inflight_T_1 = ~d_clr; // @[Monitor.scala:664:34, :705:38]
wire [64:0] _inflight_T_2 = _inflight_T & _inflight_T_1; // @[Monitor.scala:705:{27,36,38}]
wire [259:0] _inflight_opcodes_T = inflight_opcodes | a_opcodes_set; // @[Monitor.scala:616:35, :630:33, :706:43]
wire [259:0] _inflight_opcodes_T_1 = ~d_opcodes_clr; // @[Monitor.scala:668:33, :706:62]
wire [259:0] _inflight_opcodes_T_2 = _inflight_opcodes_T & _inflight_opcodes_T_1; // @[Monitor.scala:706:{43,60,62}]
wire [259:0] _inflight_sizes_T = inflight_sizes | a_sizes_set; // @[Monitor.scala:618:33, :632:31, :707:39]
wire [259:0] _inflight_sizes_T_1 = ~d_sizes_clr; // @[Monitor.scala:670:31, :707:56]
wire [259:0] _inflight_sizes_T_2 = _inflight_sizes_T & _inflight_sizes_T_1; // @[Monitor.scala:707:{39,54,56}]
reg [31:0] watchdog; // @[Monitor.scala:709:27]
wire [32:0] _watchdog_T = {1'h0, watchdog} + 33'h1; // @[Monitor.scala:709:27, :714:26]
wire [31:0] _watchdog_T_1 = _watchdog_T[31:0]; // @[Monitor.scala:714:26]
reg [64:0] inflight_1; // @[Monitor.scala:726:35]
wire [64:0] _inflight_T_3 = inflight_1; // @[Monitor.scala:726:35, :814:35]
reg [259:0] inflight_opcodes_1; // @[Monitor.scala:727:35]
wire [259:0] _inflight_opcodes_T_3 = inflight_opcodes_1; // @[Monitor.scala:727:35, :815:43]
reg [259:0] inflight_sizes_1; // @[Monitor.scala:728:35]
wire [259:0] _inflight_sizes_T_3 = inflight_sizes_1; // @[Monitor.scala:728:35, :816:41]
wire [5:0] _d_first_beats1_decode_T_7 = _d_first_beats1_decode_T_6[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _d_first_beats1_decode_T_8 = ~_d_first_beats1_decode_T_7; // @[package.scala:243:{46,76}]
wire [2:0] d_first_beats1_decode_2 = _d_first_beats1_decode_T_8[5:3]; // @[package.scala:243:46]
wire [2:0] d_first_beats1_2 = d_first_beats1_opdata_2 ? d_first_beats1_decode_2 : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [2:0] d_first_counter_2; // @[Edges.scala:229:27]
wire [3:0] _d_first_counter1_T_2 = {1'h0, d_first_counter_2} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] d_first_counter1_2 = _d_first_counter1_T_2[2:0]; // @[Edges.scala:230:28]
wire d_first_2 = d_first_counter_2 == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T_4 = d_first_counter_2 == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_5 = d_first_beats1_2 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last_2 = _d_first_last_T_4 | _d_first_last_T_5; // @[Edges.scala:232:{25,33,43}]
wire d_first_done_2 = d_first_last_2 & _d_first_T_2; // @[Decoupled.scala:51:35]
wire [2:0] _d_first_count_T_2 = ~d_first_counter1_2; // @[Edges.scala:230:28, :234:27]
wire [2:0] d_first_count_2 = d_first_beats1_2 & _d_first_count_T_2; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _d_first_counter_T_2 = d_first_2 ? d_first_beats1_2 : d_first_counter1_2; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [3:0] c_opcode_lookup; // @[Monitor.scala:747:35]
wire [3:0] c_size_lookup; // @[Monitor.scala:748:35]
wire [259:0] _c_opcode_lookup_T_1 = inflight_opcodes_1 >> _c_opcode_lookup_T; // @[Monitor.scala:727:35, :749:{44,69}]
wire [259:0] _c_opcode_lookup_T_6 = {256'h0, _c_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:749:{44,97}]
wire [259:0] _c_opcode_lookup_T_7 = {1'h0, _c_opcode_lookup_T_6[259:1]}; // @[Monitor.scala:749:{97,152}]
assign c_opcode_lookup = _c_opcode_lookup_T_7[3:0]; // @[Monitor.scala:747:35, :749:{21,152}]
wire [259:0] _c_size_lookup_T_1 = inflight_sizes_1 >> _c_size_lookup_T; // @[Monitor.scala:728:35, :750:{42,67}]
wire [259:0] _c_size_lookup_T_6 = {256'h0, _c_size_lookup_T_1[3:0]}; // @[Monitor.scala:750:{42,93}]
wire [259:0] _c_size_lookup_T_7 = {1'h0, _c_size_lookup_T_6[259:1]}; // @[Monitor.scala:750:{93,146}]
assign c_size_lookup = _c_size_lookup_T_7[3:0]; // @[Monitor.scala:748:35, :750:{21,146}]
wire [64:0] d_clr_1; // @[Monitor.scala:774:34]
wire [64:0] d_clr_wo_ready_1; // @[Monitor.scala:775:34]
wire [259:0] d_opcodes_clr_1; // @[Monitor.scala:776:34]
wire [259:0] d_sizes_clr_1; // @[Monitor.scala:777:34]
wire _T_1350 = io_in_d_valid_0 & d_first_2; // @[Monitor.scala:36:7, :784:26]
assign d_clr_wo_ready_1 = _T_1350 & d_release_ack_1 ? _d_clr_wo_ready_T_1[64:0] : 65'h0; // @[OneHot.scala:58:35]
wire _T_1332 = _T_1379 & d_first_2 & d_release_ack_1; // @[Decoupled.scala:51:35]
assign d_clr_1 = _T_1332 ? _d_clr_T_1[64:0] : 65'h0; // @[OneHot.scala:58:35]
wire [1038:0] _d_opcodes_clr_T_11 = 1039'hF << _d_opcodes_clr_T_10; // @[Monitor.scala:790:{76,101}]
assign d_opcodes_clr_1 = _T_1332 ? _d_opcodes_clr_T_11[259:0] : 260'h0; // @[Monitor.scala:776:34, :788:{25,70,88}, :790:{21,76}]
wire [1038:0] _d_sizes_clr_T_11 = 1039'hF << _d_sizes_clr_T_10; // @[Monitor.scala:791:{74,99}]
assign d_sizes_clr_1 = _T_1332 ? _d_sizes_clr_T_11[259:0] : 260'h0; // @[Monitor.scala:777:34, :788:{25,70,88}, :791:{21,74}]
wire _same_cycle_resp_T_8 = io_in_d_bits_source_0 == 7'h0; // @[Monitor.scala:36:7, :795:113]
wire [64:0] _inflight_T_4 = ~d_clr_1; // @[Monitor.scala:774:34, :814:46]
wire [64:0] _inflight_T_5 = _inflight_T_3 & _inflight_T_4; // @[Monitor.scala:814:{35,44,46}]
wire [259:0] _inflight_opcodes_T_4 = ~d_opcodes_clr_1; // @[Monitor.scala:776:34, :815:62]
wire [259:0] _inflight_opcodes_T_5 = _inflight_opcodes_T_3 & _inflight_opcodes_T_4; // @[Monitor.scala:815:{43,60,62}]
wire [259:0] _inflight_sizes_T_4 = ~d_sizes_clr_1; // @[Monitor.scala:777:34, :816:58]
wire [259:0] _inflight_sizes_T_5 = _inflight_sizes_T_3 & _inflight_sizes_T_4; // @[Monitor.scala:816:{41,56,58}]
reg [31:0] watchdog_1; // @[Monitor.scala:818:27] |
Generate the Verilog code corresponding to the following Chisel files.
File Monitor.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceLine
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import freechips.rocketchip.diplomacy.EnableMonitors
import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode}
import freechips.rocketchip.util.PlusArg
case class TLMonitorArgs(edge: TLEdge)
abstract class TLMonitorBase(args: TLMonitorArgs) extends Module
{
val io = IO(new Bundle {
val in = Input(new TLBundle(args.edge.bundle))
})
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit
legalize(io.in, args.edge, reset)
}
object TLMonitor {
def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = {
if (enable) {
EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) }
} else { node }
}
}
class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args)
{
require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal))
val cover_prop_class = PropertyClass.Default
//Like assert but can flip to being an assumption for formal verification
def monAssert(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir, cond, message, PropertyClass.Default)
}
def assume(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir.flip, cond, message, PropertyClass.Default)
}
def extra = {
args.edge.sourceInfo match {
case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)"
case _ => ""
}
}
def visible(address: UInt, source: UInt, edge: TLEdge) =
edge.client.clients.map { c =>
!c.sourceId.contains(source) ||
c.visibility.map(_.contains(address)).reduce(_ || _)
}.reduce(_ && _)
def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = {
//switch this flag to turn on diplomacy in error messages
def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n"
monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra)
// Reuse these subexpressions to save some firrtl lines
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility")
//The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B
//TODO: check for acquireT?
when (bundle.opcode === TLMessages.AcquireBlock) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AcquirePerm) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra)
monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra)
monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra)
monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra)
}
}
def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = {
monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility")
// Reuse these subexpressions to save some firrtl lines
val address_ok = edge.manager.containsSafe(edge.address(bundle))
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source
when (bundle.opcode === TLMessages.Probe) {
assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra)
assume (address_ok, "'B' channel Probe carries unmanaged address" + extra)
assume (legal_source, "'B' channel Probe carries source that is not first source" + extra)
assume (is_aligned, "'B' channel Probe address not aligned to size" + extra)
assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra)
assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra)
assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra)
monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra)
monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra)
}
}
def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = {
monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val address_ok = edge.manager.containsSafe(edge.address(bundle))
monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility")
when (bundle.opcode === TLMessages.ProbeAck) {
monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ProbeAckData) {
monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.Release) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ReleaseData) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra)
}
}
def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = {
assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val sink_ok = bundle.sink < edge.manager.endSinkId.U
val deny_put_ok = edge.manager.mayDenyPut.B
val deny_get_ok = edge.manager.mayDenyGet.B
when (bundle.opcode === TLMessages.ReleaseAck) {
assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra)
assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra)
assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra)
}
when (bundle.opcode === TLMessages.Grant) {
assume (source_ok, "'D' channel Grant carries invalid source ID" + extra)
assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra)
assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra)
}
when (bundle.opcode === TLMessages.GrantData) {
assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra)
assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra)
}
}
def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = {
val sink_ok = bundle.sink < edge.manager.endSinkId.U
monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra)
}
def legalizeFormat(bundle: TLBundle, edge: TLEdge) = {
when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) }
when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) }
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) }
when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) }
when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) }
} else {
monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra)
monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra)
monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra)
}
}
def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = {
val a_first = edge.first(a.bits, a.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (a.valid && !a_first) {
monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra)
monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra)
monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra)
monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra)
monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra)
}
when (a.fire && a_first) {
opcode := a.bits.opcode
param := a.bits.param
size := a.bits.size
source := a.bits.source
address := a.bits.address
}
}
def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = {
val b_first = edge.first(b.bits, b.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (b.valid && !b_first) {
monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra)
monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra)
monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra)
monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra)
monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra)
}
when (b.fire && b_first) {
opcode := b.bits.opcode
param := b.bits.param
size := b.bits.size
source := b.bits.source
address := b.bits.address
}
}
def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = {
// Symbolic variable
val sym_source = Wire(UInt(edge.client.endSourceId.W))
// TODO: Connect sym_source to a fixed value for simulation and to a
// free wire in formal
sym_source := 0.U
// Type casting Int to UInt
val maxSourceId = Wire(UInt(edge.client.endSourceId.W))
maxSourceId := edge.client.endSourceId.U
// Delayed verison of sym_source
val sym_source_d = Reg(UInt(edge.client.endSourceId.W))
sym_source_d := sym_source
// These will be constraints for FV setup
Property(
MonitorDirection.Monitor,
(sym_source === sym_source_d),
"sym_source should remain stable",
PropertyClass.Default)
Property(
MonitorDirection.Monitor,
(sym_source <= maxSourceId),
"sym_source should take legal value",
PropertyClass.Default)
val my_resp_pend = RegInit(false.B)
val my_opcode = Reg(UInt())
val my_size = Reg(UInt())
val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire)
val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire)
val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source)
val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source)
val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat)
val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend)
when (my_set_resp_pend) {
my_resp_pend := true.B
} .elsewhen (my_clr_resp_pend) {
my_resp_pend := false.B
}
when (my_a_first_beat) {
my_opcode := bundle.a.bits.opcode
my_size := bundle.a.bits.size
}
val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size)
val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode)
val my_resp_opcode_legal = Wire(Bool())
when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) ||
(my_resp_opcode === TLMessages.LogicalData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData)
} .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck)
} .otherwise {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck)
}
monAssert (IfThen(my_resp_pend, !my_a_first_beat),
"Request message should not be sent with a source ID, for which a response message" +
"is already pending (not received until current cycle) for a prior request message" +
"with the same source ID" + extra)
assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)),
"Response message should be accepted with a source ID only if a request message with the" +
"same source ID has been accepted or is being accepted in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)),
"Response message should be sent with a source ID only if a request message with the" +
"same source ID has been accepted or is being sent in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)),
"If d_valid is 1, then d_size should be same as a_size of the corresponding request" +
"message" + extra)
assume (IfThen(my_d_first_beat, my_resp_opcode_legal),
"If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" +
"request message" + extra)
}
def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = {
val c_first = edge.first(c.bits, c.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (c.valid && !c_first) {
monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra)
monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra)
monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra)
monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra)
monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra)
}
when (c.fire && c_first) {
opcode := c.bits.opcode
param := c.bits.param
size := c.bits.size
source := c.bits.source
address := c.bits.address
}
}
def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = {
val d_first = edge.first(d.bits, d.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val sink = Reg(UInt())
val denied = Reg(Bool())
when (d.valid && !d_first) {
assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra)
assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra)
assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra)
assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra)
assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra)
assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra)
}
when (d.fire && d_first) {
opcode := d.bits.opcode
param := d.bits.param
size := d.bits.size
source := d.bits.source
sink := d.bits.sink
denied := d.bits.denied
}
}
def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = {
legalizeMultibeatA(bundle.a, edge)
legalizeMultibeatD(bundle.d, edge)
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
legalizeMultibeatB(bundle.b, edge)
legalizeMultibeatC(bundle.c, edge)
}
}
//This is left in for almond which doesn't adhere to the tilelink protocol
@deprecated("Use legalizeADSource instead if possible","")
def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.client.endSourceId.W))
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val a_set = WireInit(0.U(edge.client.endSourceId.W))
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra)
}
if (edge.manager.minLatency > 0) {
assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = {
val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size)
val log_a_size_bus_size = log2Ceil(a_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error
inflight.suggestName("inflight")
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
inflight_opcodes.suggestName("inflight_opcodes")
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
inflight_sizes.suggestName("inflight_sizes")
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
a_first.suggestName("a_first")
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
d_first.suggestName("d_first")
val a_set = WireInit(0.U(edge.client.endSourceId.W))
val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
a_set.suggestName("a_set")
a_set_wo_ready.suggestName("a_set_wo_ready")
val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
a_opcodes_set.suggestName("a_opcodes_set")
val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
a_sizes_set.suggestName("a_sizes_set")
val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W))
a_opcode_lookup.suggestName("a_opcode_lookup")
a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U
val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W))
a_size_lookup.suggestName("a_size_lookup")
a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U
val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant))
val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant))
val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W))
a_opcodes_set_interm.suggestName("a_opcodes_set_interm")
val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W))
a_sizes_set_interm.suggestName("a_sizes_set_interm")
when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) {
a_set_wo_ready := UIntToOH(bundle.a.bits.source)
}
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U
a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U
a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U)
a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U)
monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
d_opcodes_clr.suggestName("d_opcodes_clr")
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) ||
(bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra)
assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) ||
(bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra)
assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) {
assume((!bundle.d.ready) || bundle.a.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = {
val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size)
val log_c_size_bus_size = log2Ceil(c_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W))
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
inflight.suggestName("inflight")
inflight_opcodes.suggestName("inflight_opcodes")
inflight_sizes.suggestName("inflight_sizes")
val c_first = edge.first(bundle.c.bits, bundle.c.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
c_first.suggestName("c_first")
d_first.suggestName("d_first")
val c_set = WireInit(0.U(edge.client.endSourceId.W))
val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
c_set.suggestName("c_set")
c_set_wo_ready.suggestName("c_set_wo_ready")
c_opcodes_set.suggestName("c_opcodes_set")
c_sizes_set.suggestName("c_sizes_set")
val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W))
val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W))
c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U
c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U
c_opcode_lookup.suggestName("c_opcode_lookup")
c_size_lookup.suggestName("c_size_lookup")
val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W))
val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W))
c_opcodes_set_interm.suggestName("c_opcodes_set_interm")
c_sizes_set_interm.suggestName("c_sizes_set_interm")
when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) {
c_set_wo_ready := UIntToOH(bundle.c.bits.source)
}
when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) {
c_set := UIntToOH(bundle.c.bits.source)
c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U
c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U
c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U)
c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U)
monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra)
}
val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
d_opcodes_clr.suggestName("d_opcodes_clr")
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) {
assume((!bundle.d.ready) || bundle.c.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
when (c_set_wo_ready.orR) {
assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra)
}
}
inflight := (inflight | c_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.manager.endSinkId.W))
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val e_first = true.B
val d_set = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) {
d_set := UIntToOH(bundle.d.bits.sink)
assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra)
}
val e_clr = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) {
e_clr := UIntToOH(bundle.e.bits.sink)
monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra)
}
// edge.client.minLatency applies to BC, not DE
inflight := (inflight | d_set) & ~e_clr
}
def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = {
val sourceBits = log2Ceil(edge.client.endSourceId)
val tooBig = 14 // >16kB worth of flight information gets to be too much
if (sourceBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked")
} else {
if (args.edge.params(TestplanTestType).simulation) {
if (args.edge.params(TLMonitorStrictMode)) {
legalizeADSource(bundle, edge)
legalizeCDSource(bundle, edge)
} else {
legalizeADSourceOld(bundle, edge)
}
}
if (args.edge.params(TestplanTestType).formal) {
legalizeADSourceFormal(bundle, edge)
}
}
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
// legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize...
val sinkBits = log2Ceil(edge.manager.endSinkId)
if (sinkBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked")
} else {
legalizeDESink(bundle, edge)
}
}
}
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = {
legalizeFormat (bundle, edge)
legalizeMultibeat (bundle, edge)
legalizeUnique (bundle, edge)
}
}
File Misc.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
import chisel3.util.random.LFSR
import org.chipsalliance.cde.config.Parameters
import scala.math._
class ParameterizedBundle(implicit p: Parameters) extends Bundle
trait Clocked extends Bundle {
val clock = Clock()
val reset = Bool()
}
object DecoupledHelper {
def apply(rvs: Bool*) = new DecoupledHelper(rvs)
}
class DecoupledHelper(val rvs: Seq[Bool]) {
def fire(exclude: Bool, includes: Bool*) = {
require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!")
(rvs.filter(_ ne exclude) ++ includes).reduce(_ && _)
}
def fire() = {
rvs.reduce(_ && _)
}
}
object MuxT {
def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2))
def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3))
def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4))
}
/** Creates a cascade of n MuxTs to search for a key value. */
object MuxTLookup {
def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
}
object ValidMux {
def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = {
apply(v1 +: v2.toSeq)
}
def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = {
val out = Wire(Valid(valids.head.bits.cloneType))
out.valid := valids.map(_.valid).reduce(_ || _)
out.bits := MuxCase(valids.head.bits,
valids.map(v => (v.valid -> v.bits)))
out
}
}
object Str
{
def apply(s: String): UInt = {
var i = BigInt(0)
require(s.forall(validChar _))
for (c <- s)
i = (i << 8) | c
i.U((s.length*8).W)
}
def apply(x: Char): UInt = {
require(validChar(x))
x.U(8.W)
}
def apply(x: UInt): UInt = apply(x, 10)
def apply(x: UInt, radix: Int): UInt = {
val rad = radix.U
val w = x.getWidth
require(w > 0)
var q = x
var s = digit(q % rad)
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s)
}
s
}
def apply(x: SInt): UInt = apply(x, 10)
def apply(x: SInt, radix: Int): UInt = {
val neg = x < 0.S
val abs = x.abs.asUInt
if (radix != 10) {
Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix))
} else {
val rad = radix.U
val w = abs.getWidth
require(w > 0)
var q = abs
var s = digit(q % rad)
var needSign = neg
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
val placeSpace = q === 0.U
val space = Mux(needSign, Str('-'), Str(' '))
needSign = needSign && !placeSpace
s = Cat(Mux(placeSpace, space, digit(q % rad)), s)
}
Cat(Mux(needSign, Str('-'), Str(' ')), s)
}
}
private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0)
private def validChar(x: Char) = x == (x & 0xFF)
}
object Split
{
def apply(x: UInt, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n2: Int, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
}
object Random
{
def apply(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0)
else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod))
}
def apply(mod: Int): UInt = apply(mod, randomizer)
def oneHot(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0))
else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt
}
def oneHot(mod: Int): UInt = oneHot(mod, randomizer)
private def randomizer = LFSR(16)
private def partition(value: UInt, slices: Int) =
Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U)
}
object Majority {
def apply(in: Set[Bool]): Bool = {
val n = (in.size >> 1) + 1
val clauses = in.subsets(n).map(_.reduce(_ && _))
clauses.reduce(_ || _)
}
def apply(in: Seq[Bool]): Bool = apply(in.toSet)
def apply(in: UInt): Bool = apply(in.asBools.toSet)
}
object PopCountAtLeast {
private def two(x: UInt): (Bool, Bool) = x.getWidth match {
case 1 => (x.asBool, false.B)
case n =>
val half = x.getWidth / 2
val (leftOne, leftTwo) = two(x(half - 1, 0))
val (rightOne, rightTwo) = two(x(x.getWidth - 1, half))
(leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne))
}
def apply(x: UInt, n: Int): Bool = n match {
case 0 => true.B
case 1 => x.orR
case 2 => two(x)._2
case 3 => PopCount(x) >= n.U
}
}
// This gets used everywhere, so make the smallest circuit possible ...
// Given an address and size, create a mask of beatBytes size
// eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111
// groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01
object MaskGen {
def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = {
require (groupBy >= 1 && beatBytes >= groupBy)
require (isPow2(beatBytes) && isPow2(groupBy))
val lgBytes = log2Ceil(beatBytes)
val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U
def helper(i: Int): Seq[(Bool, Bool)] = {
if (i == 0) {
Seq((lgSize >= lgBytes.asUInt, true.B))
} else {
val sub = helper(i-1)
val size = sizeOH(lgBytes - i)
val bit = addr_lo(lgBytes - i)
val nbit = !bit
Seq.tabulate (1 << i) { j =>
val (sub_acc, sub_eq) = sub(j/2)
val eq = sub_eq && (if (j % 2 == 1) bit else nbit)
val acc = sub_acc || (size && eq)
(acc, eq)
}
}
}
if (groupBy == beatBytes) 1.U else
Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse)
}
}
File PlusArg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.experimental._
import chisel3.util.HasBlackBoxResource
@deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05")
case class PlusArgInfo(default: BigInt, docstring: String)
/** Case class for PlusArg information
*
* @tparam A scala type of the PlusArg value
* @param default optional default value
* @param docstring text to include in the help
* @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT)
*/
private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String)
/** Typeclass for converting a type to a doctype string
* @tparam A some type
*/
trait Doctypeable[A] {
/** Return the doctype string for some option */
def toDoctype(a: Option[A]): String
}
/** Object containing implementations of the Doctypeable typeclass */
object Doctypes {
/** Converts an Int => "INT" */
implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" }
/** Converts a BigInt => "INT" */
implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" }
/** Converts a String => "STRING" */
implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" }
}
class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map(
"FORMAT" -> StringParam(format),
"DEFAULT" -> IntParam(default),
"WIDTH" -> IntParam(width)
)) with HasBlackBoxResource {
val io = IO(new Bundle {
val out = Output(UInt(width.W))
})
addResource("/vsrc/plusarg_reader.v")
}
/* This wrapper class has no outputs, making it clear it is a simulation-only construct */
class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module {
val io = IO(new Bundle {
val count = Input(UInt(width.W))
})
val max = Module(new plusarg_reader(format, default, docstring, width)).io.out
when (max > 0.U) {
assert (io.count < max, s"Timeout exceeded: $docstring")
}
}
import Doctypes._
object PlusArg
{
/** PlusArg("foo") will return 42.U if the simulation is run with +foo=42
* Do not use this as an initial register value. The value is set in an
* initial block and thus accessing it from another initial is racey.
* Add a docstring to document the arg, which can be dumped in an elaboration
* pass.
*/
def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out
}
/** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert
* to kill the simulation when count exceeds the specified integer argument.
* Default 0 will never assert.
*/
def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count
}
}
object PlusArgArtefacts {
private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty
/* Add a new PlusArg */
@deprecated(
"Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08",
"Rocket Chip 2020.05"
)
def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring)
/** Add a new PlusArg
*
* @tparam A scala type of the PlusArg value
* @param name name for the PlusArg
* @param default optional default value
* @param docstring text to include in the help
*/
def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit =
artefacts = artefacts ++
Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default)))
/* From plus args, generate help text */
private def serializeHelp_cHeader(tab: String = ""): String = artefacts
.map{ case(arg, info) =>
s"""|$tab+$arg=${info.doctype}\\n\\
|$tab${" "*20}${info.docstring}\\n\\
|""".stripMargin ++ info.default.map{ case default =>
s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("")
}.toSeq.mkString("\\n\\\n") ++ "\""
/* From plus args, generate a char array of their names */
private def serializeArray_cHeader(tab: String = ""): String = {
val prettyTab = tab + " " * 44 // Length of 'static const ...'
s"${tab}static const char * verilog_plusargs [] = {\\\n" ++
artefacts
.map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" }
.mkString("")++
s"${prettyTab}0};"
}
/* Generate C code to be included in emulator.cc that helps with
* argument parsing based on available Verilog PlusArgs */
def serialize_cHeader(): String =
s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\
|${serializeHelp_cHeader(" "*7)}
|${serializeArray_cHeader()}
|""".stripMargin
}
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
File Bundles.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import freechips.rocketchip.util._
import scala.collection.immutable.ListMap
import chisel3.util.Decoupled
import chisel3.util.DecoupledIO
import chisel3.reflect.DataMirror
abstract class TLBundleBase(val params: TLBundleParameters) extends Bundle
// common combos in lazy policy:
// Put + Acquire
// Release + AccessAck
object TLMessages
{
// A B C D E
def PutFullData = 0.U // . . => AccessAck
def PutPartialData = 1.U // . . => AccessAck
def ArithmeticData = 2.U // . . => AccessAckData
def LogicalData = 3.U // . . => AccessAckData
def Get = 4.U // . . => AccessAckData
def Hint = 5.U // . . => HintAck
def AcquireBlock = 6.U // . => Grant[Data]
def AcquirePerm = 7.U // . => Grant[Data]
def Probe = 6.U // . => ProbeAck[Data]
def AccessAck = 0.U // . .
def AccessAckData = 1.U // . .
def HintAck = 2.U // . .
def ProbeAck = 4.U // .
def ProbeAckData = 5.U // .
def Release = 6.U // . => ReleaseAck
def ReleaseData = 7.U // . => ReleaseAck
def Grant = 4.U // . => GrantAck
def GrantData = 5.U // . => GrantAck
def ReleaseAck = 6.U // .
def GrantAck = 0.U // .
def isA(x: UInt) = x <= AcquirePerm
def isB(x: UInt) = x <= Probe
def isC(x: UInt) = x <= ReleaseData
def isD(x: UInt) = x <= ReleaseAck
def adResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, Grant, Grant)
def bcResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, ProbeAck, ProbeAck)
def a = Seq( ("PutFullData",TLPermissions.PermMsgReserved),
("PutPartialData",TLPermissions.PermMsgReserved),
("ArithmeticData",TLAtomics.ArithMsg),
("LogicalData",TLAtomics.LogicMsg),
("Get",TLPermissions.PermMsgReserved),
("Hint",TLHints.HintsMsg),
("AcquireBlock",TLPermissions.PermMsgGrow),
("AcquirePerm",TLPermissions.PermMsgGrow))
def b = Seq( ("PutFullData",TLPermissions.PermMsgReserved),
("PutPartialData",TLPermissions.PermMsgReserved),
("ArithmeticData",TLAtomics.ArithMsg),
("LogicalData",TLAtomics.LogicMsg),
("Get",TLPermissions.PermMsgReserved),
("Hint",TLHints.HintsMsg),
("Probe",TLPermissions.PermMsgCap))
def c = Seq( ("AccessAck",TLPermissions.PermMsgReserved),
("AccessAckData",TLPermissions.PermMsgReserved),
("HintAck",TLPermissions.PermMsgReserved),
("Invalid Opcode",TLPermissions.PermMsgReserved),
("ProbeAck",TLPermissions.PermMsgReport),
("ProbeAckData",TLPermissions.PermMsgReport),
("Release",TLPermissions.PermMsgReport),
("ReleaseData",TLPermissions.PermMsgReport))
def d = Seq( ("AccessAck",TLPermissions.PermMsgReserved),
("AccessAckData",TLPermissions.PermMsgReserved),
("HintAck",TLPermissions.PermMsgReserved),
("Invalid Opcode",TLPermissions.PermMsgReserved),
("Grant",TLPermissions.PermMsgCap),
("GrantData",TLPermissions.PermMsgCap),
("ReleaseAck",TLPermissions.PermMsgReserved))
}
/**
* The three primary TileLink permissions are:
* (T)runk: the agent is (or is on inwards path to) the global point of serialization.
* (B)ranch: the agent is on an outwards path to
* (N)one:
* These permissions are permuted by transfer operations in various ways.
* Operations can cap permissions, request for them to be grown or shrunk,
* or for a report on their current status.
*/
object TLPermissions
{
val aWidth = 2
val bdWidth = 2
val cWidth = 3
// Cap types (Grant = new permissions, Probe = permisions <= target)
def toT = 0.U(bdWidth.W)
def toB = 1.U(bdWidth.W)
def toN = 2.U(bdWidth.W)
def isCap(x: UInt) = x <= toN
// Grow types (Acquire = permissions >= target)
def NtoB = 0.U(aWidth.W)
def NtoT = 1.U(aWidth.W)
def BtoT = 2.U(aWidth.W)
def isGrow(x: UInt) = x <= BtoT
// Shrink types (ProbeAck, Release)
def TtoB = 0.U(cWidth.W)
def TtoN = 1.U(cWidth.W)
def BtoN = 2.U(cWidth.W)
def isShrink(x: UInt) = x <= BtoN
// Report types (ProbeAck, Release)
def TtoT = 3.U(cWidth.W)
def BtoB = 4.U(cWidth.W)
def NtoN = 5.U(cWidth.W)
def isReport(x: UInt) = x <= NtoN
def PermMsgGrow:Seq[String] = Seq("Grow NtoB", "Grow NtoT", "Grow BtoT")
def PermMsgCap:Seq[String] = Seq("Cap toT", "Cap toB", "Cap toN")
def PermMsgReport:Seq[String] = Seq("Shrink TtoB", "Shrink TtoN", "Shrink BtoN", "Report TotT", "Report BtoB", "Report NtoN")
def PermMsgReserved:Seq[String] = Seq("Reserved")
}
object TLAtomics
{
val width = 3
// Arithmetic types
def MIN = 0.U(width.W)
def MAX = 1.U(width.W)
def MINU = 2.U(width.W)
def MAXU = 3.U(width.W)
def ADD = 4.U(width.W)
def isArithmetic(x: UInt) = x <= ADD
// Logical types
def XOR = 0.U(width.W)
def OR = 1.U(width.W)
def AND = 2.U(width.W)
def SWAP = 3.U(width.W)
def isLogical(x: UInt) = x <= SWAP
def ArithMsg:Seq[String] = Seq("MIN", "MAX", "MINU", "MAXU", "ADD")
def LogicMsg:Seq[String] = Seq("XOR", "OR", "AND", "SWAP")
}
object TLHints
{
val width = 1
def PREFETCH_READ = 0.U(width.W)
def PREFETCH_WRITE = 1.U(width.W)
def isHints(x: UInt) = x <= PREFETCH_WRITE
def HintsMsg:Seq[String] = Seq("PrefetchRead", "PrefetchWrite")
}
sealed trait TLChannel extends TLBundleBase {
val channelName: String
}
sealed trait TLDataChannel extends TLChannel
sealed trait TLAddrChannel extends TLDataChannel
final class TLBundleA(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleA_${params.shortName}"
val channelName = "'A' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(List(TLAtomics.width, TLPermissions.aWidth, TLHints.width).max.W) // amo_opcode || grow perms || hint
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // from
val address = UInt(params.addressBits.W) // to
val user = BundleMap(params.requestFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val mask = UInt((params.dataBits/8).W)
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleB(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleB_${params.shortName}"
val channelName = "'B' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.bdWidth.W) // cap perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // to
val address = UInt(params.addressBits.W) // from
// variable fields during multibeat:
val mask = UInt((params.dataBits/8).W)
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleC(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleC_${params.shortName}"
val channelName = "'C' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.cWidth.W) // shrink or report perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // from
val address = UInt(params.addressBits.W) // to
val user = BundleMap(params.requestFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleD(params: TLBundleParameters)
extends TLBundleBase(params) with TLDataChannel
{
override def typeName = s"TLBundleD_${params.shortName}"
val channelName = "'D' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.bdWidth.W) // cap perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // to
val sink = UInt(params.sinkBits.W) // from
val denied = Bool() // implies corrupt iff *Data
val user = BundleMap(params.responseFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleE(params: TLBundleParameters)
extends TLBundleBase(params) with TLChannel
{
override def typeName = s"TLBundleE_${params.shortName}"
val channelName = "'E' channel"
val sink = UInt(params.sinkBits.W) // to
}
class TLBundle(val params: TLBundleParameters) extends Record
{
// Emulate a Bundle with elements abcde or ad depending on params.hasBCE
private val optA = Some (Decoupled(new TLBundleA(params)))
private val optB = params.hasBCE.option(Flipped(Decoupled(new TLBundleB(params))))
private val optC = params.hasBCE.option(Decoupled(new TLBundleC(params)))
private val optD = Some (Flipped(Decoupled(new TLBundleD(params))))
private val optE = params.hasBCE.option(Decoupled(new TLBundleE(params)))
def a: DecoupledIO[TLBundleA] = optA.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleA(params)))))
def b: DecoupledIO[TLBundleB] = optB.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleB(params)))))
def c: DecoupledIO[TLBundleC] = optC.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleC(params)))))
def d: DecoupledIO[TLBundleD] = optD.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleD(params)))))
def e: DecoupledIO[TLBundleE] = optE.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleE(params)))))
val elements =
if (params.hasBCE) ListMap("e" -> e, "d" -> d, "c" -> c, "b" -> b, "a" -> a)
else ListMap("d" -> d, "a" -> a)
def tieoff(): Unit = {
DataMirror.specifiedDirectionOf(a.ready) match {
case SpecifiedDirection.Input =>
a.ready := false.B
c.ready := false.B
e.ready := false.B
b.valid := false.B
d.valid := false.B
case SpecifiedDirection.Output =>
a.valid := false.B
c.valid := false.B
e.valid := false.B
b.ready := false.B
d.ready := false.B
case _ =>
}
}
}
object TLBundle
{
def apply(params: TLBundleParameters) = new TLBundle(params)
}
class TLAsyncBundleBase(val params: TLAsyncBundleParameters) extends Bundle
class TLAsyncBundle(params: TLAsyncBundleParameters) extends TLAsyncBundleBase(params)
{
val a = new AsyncBundle(new TLBundleA(params.base), params.async)
val b = Flipped(new AsyncBundle(new TLBundleB(params.base), params.async))
val c = new AsyncBundle(new TLBundleC(params.base), params.async)
val d = Flipped(new AsyncBundle(new TLBundleD(params.base), params.async))
val e = new AsyncBundle(new TLBundleE(params.base), params.async)
}
class TLRationalBundle(params: TLBundleParameters) extends TLBundleBase(params)
{
val a = RationalIO(new TLBundleA(params))
val b = Flipped(RationalIO(new TLBundleB(params)))
val c = RationalIO(new TLBundleC(params))
val d = Flipped(RationalIO(new TLBundleD(params)))
val e = RationalIO(new TLBundleE(params))
}
class TLCreditedBundle(params: TLBundleParameters) extends TLBundleBase(params)
{
val a = CreditedIO(new TLBundleA(params))
val b = Flipped(CreditedIO(new TLBundleB(params)))
val c = CreditedIO(new TLBundleC(params))
val d = Flipped(CreditedIO(new TLBundleD(params)))
val e = CreditedIO(new TLBundleE(params))
}
File Parameters.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.diplomacy
import chisel3._
import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor}
import freechips.rocketchip.util.ShiftQueue
/** Options for describing the attributes of memory regions */
object RegionType {
// Define the 'more relaxed than' ordering
val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS)
sealed trait T extends Ordered[T] {
def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this)
}
case object CACHED extends T // an intermediate agent may have cached a copy of the region for you
case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided
case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible
case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached
case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects
case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed
case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively
}
// A non-empty half-open range; [start, end)
case class IdRange(start: Int, end: Int) extends Ordered[IdRange]
{
require (start >= 0, s"Ids cannot be negative, but got: $start.")
require (start <= end, "Id ranges cannot be negative.")
def compare(x: IdRange) = {
val primary = (this.start - x.start).signum
val secondary = (x.end - this.end).signum
if (primary != 0) primary else secondary
}
def overlaps(x: IdRange) = start < x.end && x.start < end
def contains(x: IdRange) = start <= x.start && x.end <= end
def contains(x: Int) = start <= x && x < end
def contains(x: UInt) =
if (size == 0) {
false.B
} else if (size == 1) { // simple comparison
x === start.U
} else {
// find index of largest different bit
val largestDeltaBit = log2Floor(start ^ (end-1))
val smallestCommonBit = largestDeltaBit + 1 // may not exist in x
val uncommonMask = (1 << smallestCommonBit) - 1
val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0)
// the prefix must match exactly (note: may shift ALL bits away)
(x >> smallestCommonBit) === (start >> smallestCommonBit).U &&
// firrtl constant prop range analysis can eliminate these two:
(start & uncommonMask).U <= uncommonBits &&
uncommonBits <= ((end-1) & uncommonMask).U
}
def shift(x: Int) = IdRange(start+x, end+x)
def size = end - start
def isEmpty = end == start
def range = start until end
}
object IdRange
{
def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else {
val ranges = s.sorted
(ranges.tail zip ranges.init) find { case (a, b) => a overlaps b }
}
}
// An potentially empty inclusive range of 2-powers [min, max] (in bytes)
case class TransferSizes(min: Int, max: Int)
{
def this(x: Int) = this(x, x)
require (min <= max, s"Min transfer $min > max transfer $max")
require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)")
require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max")
require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min")
require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)")
def none = min == 0
def contains(x: Int) = isPow2(x) && min <= x && x <= max
def containsLg(x: Int) = contains(1 << x)
def containsLg(x: UInt) =
if (none) false.B
else if (min == max) { log2Ceil(min).U === x }
else { log2Ceil(min).U <= x && x <= log2Ceil(max).U }
def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max)
def intersect(x: TransferSizes) =
if (x.max < min || max < x.min) TransferSizes.none
else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max))
// Not a union, because the result may contain sizes contained by neither term
// NOT TO BE CONFUSED WITH COVERPOINTS
def mincover(x: TransferSizes) = {
if (none) {
x
} else if (x.none) {
this
} else {
TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max))
}
}
override def toString() = "TransferSizes[%d, %d]".format(min, max)
}
object TransferSizes {
def apply(x: Int) = new TransferSizes(x)
val none = new TransferSizes(0)
def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _)
def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _)
implicit def asBool(x: TransferSizes) = !x.none
}
// AddressSets specify the address space managed by the manager
// Base is the base address, and mask are the bits consumed by the manager
// e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff
// e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ...
case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet]
{
// Forbid misaligned base address (and empty sets)
require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}")
require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous
// We do allow negative mask (=> ignore all high bits)
def contains(x: BigInt) = ((x ^ base) & ~mask) == 0
def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S
// turn x into an address contained in this set
def legalize(x: UInt): UInt = base.U | (mask.U & x)
// overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1)
def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0
// contains iff bitwise: x.mask => mask && contains(x.base)
def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0
// The number of bytes to which the manager must be aligned
def alignment = ((mask + 1) & ~mask)
// Is this a contiguous memory range
def contiguous = alignment == mask+1
def finite = mask >= 0
def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask }
// Widen the match function to ignore all bits in imask
def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask)
// Return an AddressSet that only contains the addresses both sets contain
def intersect(x: AddressSet): Option[AddressSet] = {
if (!overlaps(x)) {
None
} else {
val r_mask = mask & x.mask
val r_base = base | x.base
Some(AddressSet(r_base, r_mask))
}
}
def subtract(x: AddressSet): Seq[AddressSet] = {
intersect(x) match {
case None => Seq(this)
case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit =>
val nmask = (mask & (bit-1)) | remove.mask
val nbase = (remove.base ^ bit) & ~nmask
AddressSet(nbase, nmask)
}
}
}
// AddressSets have one natural Ordering (the containment order, if contiguous)
def compare(x: AddressSet) = {
val primary = (this.base - x.base).signum // smallest address first
val secondary = (x.mask - this.mask).signum // largest mask first
if (primary != 0) primary else secondary
}
// We always want to see things in hex
override def toString() = {
if (mask >= 0) {
"AddressSet(0x%x, 0x%x)".format(base, mask)
} else {
"AddressSet(0x%x, ~0x%x)".format(base, ~mask)
}
}
def toRanges = {
require (finite, "Ranges cannot be calculated on infinite mask")
val size = alignment
val fragments = mask & ~(size-1)
val bits = bitIndexes(fragments)
(BigInt(0) until (BigInt(1) << bits.size)).map { i =>
val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) }
AddressRange(off, size)
}
}
}
object AddressSet
{
val everything = AddressSet(0, -1)
def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = {
if (size == 0) tail.reverse else {
val maxBaseAlignment = base & (-base) // 0 for infinite (LSB)
val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size
val step =
if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment)
maxSizeAlignment else maxBaseAlignment
misaligned(base+step, size-step, AddressSet(base, step-1) +: tail)
}
}
def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = {
// Pair terms up by ignoring 'bit'
seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) =>
if (seq.size == 1) {
seq.head // singleton -> unaffected
} else {
key.copy(mask = key.mask | bit) // pair - widen mask by bit
}
}.toList
}
def unify(seq: Seq[AddressSet]): Seq[AddressSet] = {
val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _)
AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted
}
def enumerateMask(mask: BigInt): Seq[BigInt] = {
def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] =
if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail)
helper(0, Nil)
}
def enumerateBits(mask: BigInt): Seq[BigInt] = {
def helper(x: BigInt): Seq[BigInt] = {
if (x == 0) {
Nil
} else {
val bit = x & (-x)
bit +: helper(x & ~bit)
}
}
helper(mask)
}
}
case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean)
{
require (depth >= 0, "Buffer depth must be >= 0")
def isDefined = depth > 0
def latency = if (isDefined && !flow) 1 else 0
def apply[T <: Data](x: DecoupledIO[T]) =
if (isDefined) Queue(x, depth, flow=flow, pipe=pipe)
else x
def irrevocable[T <: Data](x: ReadyValidIO[T]) =
if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe)
else x
def sq[T <: Data](x: DecoupledIO[T]) =
if (!isDefined) x else {
val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe))
sq.io.enq <> x
sq.io.deq
}
override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "")
}
object BufferParams
{
implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false)
val default = BufferParams(2)
val none = BufferParams(0)
val flow = BufferParams(1, true, false)
val pipe = BufferParams(1, false, true)
}
case class TriStateValue(value: Boolean, set: Boolean)
{
def update(orig: Boolean) = if (set) value else orig
}
object TriStateValue
{
implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true)
def unset = TriStateValue(false, false)
}
trait DirectedBuffers[T] {
def copyIn(x: BufferParams): T
def copyOut(x: BufferParams): T
def copyInOut(x: BufferParams): T
}
trait IdMapEntry {
def name: String
def from: IdRange
def to: IdRange
def isCache: Boolean
def requestFifo: Boolean
def maxTransactionsInFlight: Option[Int]
def pretty(fmt: String) =
if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5
fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
} else {
fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
}
}
abstract class IdMap[T <: IdMapEntry] {
protected val fmt: String
val mapping: Seq[T]
def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n")
}
File Edges.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
class TLEdge(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdgeParameters(client, manager, params, sourceInfo)
{
def isAligned(address: UInt, lgSize: UInt): Bool = {
if (maxLgSize == 0) true.B else {
val mask = UIntToOH1(lgSize, maxLgSize)
(address & mask) === 0.U
}
}
def mask(address: UInt, lgSize: UInt): UInt =
MaskGen(address, lgSize, manager.beatBytes)
def staticHasData(bundle: TLChannel): Option[Boolean] = {
bundle match {
case _:TLBundleA => {
// Do there exist A messages with Data?
val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial
// Do there exist A messages without Data?
val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint
// Statically optimize the case where hasData is a constant
if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None
}
case _:TLBundleB => {
// Do there exist B messages with Data?
val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial
// Do there exist B messages without Data?
val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint
// Statically optimize the case where hasData is a constant
if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None
}
case _:TLBundleC => {
// Do there eixst C messages with Data?
val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe
// Do there exist C messages without Data?
val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe
if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None
}
case _:TLBundleD => {
// Do there eixst D messages with Data?
val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB
// Do there exist D messages without Data?
val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT
if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None
}
case _:TLBundleE => Some(false)
}
}
def isRequest(x: TLChannel): Bool = {
x match {
case a: TLBundleA => true.B
case b: TLBundleB => true.B
case c: TLBundleC => c.opcode(2) && c.opcode(1)
// opcode === TLMessages.Release ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(2) && !d.opcode(1)
// opcode === TLMessages.Grant ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
}
def isResponse(x: TLChannel): Bool = {
x match {
case a: TLBundleA => false.B
case b: TLBundleB => false.B
case c: TLBundleC => !c.opcode(2) || !c.opcode(1)
// opcode =/= TLMessages.Release &&
// opcode =/= TLMessages.ReleaseData
case d: TLBundleD => true.B // Grant isResponse + isRequest
case e: TLBundleE => true.B
}
}
def hasData(x: TLChannel): Bool = {
val opdata = x match {
case a: TLBundleA => !a.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case b: TLBundleB => !b.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case c: TLBundleC => c.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.ProbeAckData ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
staticHasData(x).map(_.B).getOrElse(opdata)
}
def opcode(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.opcode
case b: TLBundleB => b.opcode
case c: TLBundleC => c.opcode
case d: TLBundleD => d.opcode
}
}
def param(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.param
case b: TLBundleB => b.param
case c: TLBundleC => c.param
case d: TLBundleD => d.param
}
}
def size(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.size
case b: TLBundleB => b.size
case c: TLBundleC => c.size
case d: TLBundleD => d.size
}
}
def data(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.data
case b: TLBundleB => b.data
case c: TLBundleC => c.data
case d: TLBundleD => d.data
}
}
def corrupt(x: TLDataChannel): Bool = {
x match {
case a: TLBundleA => a.corrupt
case b: TLBundleB => b.corrupt
case c: TLBundleC => c.corrupt
case d: TLBundleD => d.corrupt
}
}
def mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.mask
case b: TLBundleB => b.mask
case c: TLBundleC => mask(c.address, c.size)
}
}
def full_mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => mask(a.address, a.size)
case b: TLBundleB => mask(b.address, b.size)
case c: TLBundleC => mask(c.address, c.size)
}
}
def address(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.address
case b: TLBundleB => b.address
case c: TLBundleC => c.address
}
}
def source(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.source
case b: TLBundleB => b.source
case c: TLBundleC => c.source
case d: TLBundleD => d.source
}
}
def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes)
def addr_lo(x: UInt): UInt =
if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0)
def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x))
def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x))
def numBeats(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 1.U
case bundle: TLDataChannel => {
val hasData = this.hasData(bundle)
val size = this.size(bundle)
val cutoff = log2Ceil(manager.beatBytes)
val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U
val decode = UIntToOH(size, maxLgSize+1) >> cutoff
Mux(hasData, decode | small.asUInt, 1.U)
}
}
}
def numBeats1(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 0.U
case bundle: TLDataChannel => {
if (maxLgSize == 0) {
0.U
} else {
val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes)
Mux(hasData(bundle), decode, 0.U)
}
}
}
}
def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val beats1 = numBeats1(bits)
val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W))
val counter1 = counter - 1.U
val first = counter === 0.U
val last = counter === 1.U || beats1 === 0.U
val done = last && fire
val count = (beats1 & ~counter1)
when (fire) {
counter := Mux(first, beats1, counter1)
}
(first, last, done, count)
}
def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1
def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire)
def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid)
def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2
def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire)
def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid)
def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3
def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire)
def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid)
def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3)
}
def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire)
def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid)
def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4)
}
def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire)
def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid)
def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes))
}
def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire)
def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid)
// Does the request need T permissions to be executed?
def needT(a: TLBundleA): Bool = {
val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLPermissions.NtoB -> false.B,
TLPermissions.NtoT -> true.B,
TLPermissions.BtoT -> true.B))
MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array(
TLMessages.PutFullData -> true.B,
TLMessages.PutPartialData -> true.B,
TLMessages.ArithmeticData -> true.B,
TLMessages.LogicalData -> true.B,
TLMessages.Get -> false.B,
TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLHints.PREFETCH_READ -> false.B,
TLHints.PREFETCH_WRITE -> true.B)),
TLMessages.AcquireBlock -> acq_needT,
TLMessages.AcquirePerm -> acq_needT))
}
// This is a very expensive circuit; use only if you really mean it!
def inFlight(x: TLBundle): (UInt, UInt) = {
val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W))
val bce = manager.anySupportAcquireB && client.anySupportProbe
val (a_first, a_last, _) = firstlast(x.a)
val (b_first, b_last, _) = firstlast(x.b)
val (c_first, c_last, _) = firstlast(x.c)
val (d_first, d_last, _) = firstlast(x.d)
val (e_first, e_last, _) = firstlast(x.e)
val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits))
val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits))
val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits))
val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits))
val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits))
val a_inc = x.a.fire && a_first && a_request
val b_inc = x.b.fire && b_first && b_request
val c_inc = x.c.fire && c_first && c_request
val d_inc = x.d.fire && d_first && d_request
val e_inc = x.e.fire && e_first && e_request
val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil))
val a_dec = x.a.fire && a_last && a_response
val b_dec = x.b.fire && b_last && b_response
val c_dec = x.c.fire && c_last && c_response
val d_dec = x.d.fire && d_last && d_response
val e_dec = x.e.fire && e_last && e_response
val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil))
val next_flight = flight + PopCount(inc) - PopCount(dec)
flight := next_flight
(flight, next_flight)
}
def prettySourceMapping(context: String): String = {
s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n"
}
}
class TLEdgeOut(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
// Transfers
def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquireBlock
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquirePerm
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.Release
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ReleaseData
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) =
Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B)
def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAck
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions, data)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAckData
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B)
def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink)
def GrantAck(toSink: UInt): TLBundleE = {
val e = Wire(new TLBundleE(bundle))
e.sink := toSink
e
}
// Accesses
def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsGetFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Get
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutFullFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutFullData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, mask, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutPartialFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutPartialData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = {
require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsArithmeticFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.ArithmeticData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsLogicalFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.LogicalData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = {
require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsHintFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Hint
a.param := param
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data)
def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAckData
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size)
def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.HintAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
}
class TLEdgeIn(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = {
val todo = x.filter(!_.isEmpty)
val heads = todo.map(_.head)
val tails = todo.map(_.tail)
if (todo.isEmpty) Nil else { heads +: myTranspose(tails) }
}
// Transfers
def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = {
require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}")
val legal = client.supportsProbe(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Probe
b.param := capPermissions
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.Grant
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.GrantData
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B)
def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.ReleaseAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
// Accesses
def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = {
require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsGet(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Get
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsPutFull(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutFullData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, mask, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsPutPartial(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutPartialData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsArithmetic(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.ArithmeticData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsLogical(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.LogicalData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = {
require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsHint(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Hint
b.param := param
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size)
def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied)
def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data)
def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAckData
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B)
def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied)
def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B)
def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.HintAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
}
| module TLMonitor_47( // @[Monitor.scala:36:7]
input clock, // @[Monitor.scala:36:7]
input reset, // @[Monitor.scala:36:7]
input io_in_a_ready, // @[Monitor.scala:20:14]
input io_in_a_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14]
input [1:0] io_in_a_bits_size, // @[Monitor.scala:20:14]
input [10:0] io_in_a_bits_source, // @[Monitor.scala:20:14]
input [27:0] io_in_a_bits_address, // @[Monitor.scala:20:14]
input [7:0] io_in_a_bits_mask, // @[Monitor.scala:20:14]
input [63:0] io_in_a_bits_data, // @[Monitor.scala:20:14]
input io_in_a_bits_corrupt, // @[Monitor.scala:20:14]
input io_in_d_ready, // @[Monitor.scala:20:14]
input io_in_d_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14]
input [1:0] io_in_d_bits_size, // @[Monitor.scala:20:14]
input [10:0] io_in_d_bits_source, // @[Monitor.scala:20:14]
input [63:0] io_in_d_bits_data // @[Monitor.scala:20:14]
);
wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11]
wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11]
wire io_in_a_ready_0 = io_in_a_ready; // @[Monitor.scala:36:7]
wire io_in_a_valid_0 = io_in_a_valid; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_opcode_0 = io_in_a_bits_opcode; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_param_0 = io_in_a_bits_param; // @[Monitor.scala:36:7]
wire [1:0] io_in_a_bits_size_0 = io_in_a_bits_size; // @[Monitor.scala:36:7]
wire [10:0] io_in_a_bits_source_0 = io_in_a_bits_source; // @[Monitor.scala:36:7]
wire [27:0] io_in_a_bits_address_0 = io_in_a_bits_address; // @[Monitor.scala:36:7]
wire [7:0] io_in_a_bits_mask_0 = io_in_a_bits_mask; // @[Monitor.scala:36:7]
wire [63:0] io_in_a_bits_data_0 = io_in_a_bits_data; // @[Monitor.scala:36:7]
wire io_in_a_bits_corrupt_0 = io_in_a_bits_corrupt; // @[Monitor.scala:36:7]
wire io_in_d_ready_0 = io_in_d_ready; // @[Monitor.scala:36:7]
wire io_in_d_valid_0 = io_in_d_valid; // @[Monitor.scala:36:7]
wire [2:0] io_in_d_bits_opcode_0 = io_in_d_bits_opcode; // @[Monitor.scala:36:7]
wire [1:0] io_in_d_bits_size_0 = io_in_d_bits_size; // @[Monitor.scala:36:7]
wire [10:0] io_in_d_bits_source_0 = io_in_d_bits_source; // @[Monitor.scala:36:7]
wire [63:0] io_in_d_bits_data_0 = io_in_d_bits_data; // @[Monitor.scala:36:7]
wire io_in_d_bits_sink = 1'h0; // @[Monitor.scala:36:7]
wire io_in_d_bits_denied = 1'h0; // @[Monitor.scala:36:7]
wire io_in_d_bits_corrupt = 1'h0; // @[Monitor.scala:36:7]
wire _source_ok_T = 1'h0; // @[Parameters.scala:54:10]
wire _source_ok_T_6 = 1'h0; // @[Parameters.scala:54:10]
wire sink_ok = 1'h0; // @[Monitor.scala:309:31]
wire a_first_beats1_decode = 1'h0; // @[Edges.scala:220:59]
wire a_first_beats1 = 1'h0; // @[Edges.scala:221:14]
wire a_first_count = 1'h0; // @[Edges.scala:234:25]
wire d_first_beats1_decode = 1'h0; // @[Edges.scala:220:59]
wire d_first_beats1 = 1'h0; // @[Edges.scala:221:14]
wire d_first_count = 1'h0; // @[Edges.scala:234:25]
wire a_first_beats1_decode_1 = 1'h0; // @[Edges.scala:220:59]
wire a_first_beats1_1 = 1'h0; // @[Edges.scala:221:14]
wire a_first_count_1 = 1'h0; // @[Edges.scala:234:25]
wire d_first_beats1_decode_1 = 1'h0; // @[Edges.scala:220:59]
wire d_first_beats1_1 = 1'h0; // @[Edges.scala:221:14]
wire d_first_count_1 = 1'h0; // @[Edges.scala:234:25]
wire _c_first_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_T = 1'h0; // @[Decoupled.scala:51:35]
wire c_first_beats1_decode = 1'h0; // @[Edges.scala:220:59]
wire c_first_beats1_opdata = 1'h0; // @[Edges.scala:102:36]
wire c_first_beats1 = 1'h0; // @[Edges.scala:221:14]
wire _c_first_last_T = 1'h0; // @[Edges.scala:232:25]
wire c_first_done = 1'h0; // @[Edges.scala:233:22]
wire _c_first_count_T = 1'h0; // @[Edges.scala:234:27]
wire c_first_count = 1'h0; // @[Edges.scala:234:25]
wire _c_first_counter_T = 1'h0; // @[Edges.scala:236:21]
wire d_first_beats1_decode_2 = 1'h0; // @[Edges.scala:220:59]
wire d_first_beats1_2 = 1'h0; // @[Edges.scala:221:14]
wire d_first_count_2 = 1'h0; // @[Edges.scala:234:25]
wire _c_set_wo_ready_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_wo_ready_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_wo_ready_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_T = 1'h0; // @[Monitor.scala:772:47]
wire _c_probe_ack_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_T_1 = 1'h0; // @[Monitor.scala:772:95]
wire c_probe_ack = 1'h0; // @[Monitor.scala:772:71]
wire _same_cycle_resp_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_T_3 = 1'h0; // @[Monitor.scala:795:44]
wire _same_cycle_resp_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_T_4 = 1'h0; // @[Edges.scala:68:36]
wire _same_cycle_resp_T_5 = 1'h0; // @[Edges.scala:68:51]
wire _same_cycle_resp_T_6 = 1'h0; // @[Edges.scala:68:40]
wire _same_cycle_resp_T_7 = 1'h0; // @[Monitor.scala:795:55]
wire _same_cycle_resp_WIRE_4_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_4_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_4_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_5_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_5_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_5_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire same_cycle_resp_1 = 1'h0; // @[Monitor.scala:795:88]
wire _source_ok_T_1 = 1'h1; // @[Parameters.scala:54:32]
wire _source_ok_T_2 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_3 = 1'h1; // @[Parameters.scala:54:67]
wire _source_ok_T_7 = 1'h1; // @[Parameters.scala:54:32]
wire _source_ok_T_8 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_9 = 1'h1; // @[Parameters.scala:54:67]
wire _a_first_last_T_1 = 1'h1; // @[Edges.scala:232:43]
wire a_first_last = 1'h1; // @[Edges.scala:232:33]
wire _d_first_last_T_1 = 1'h1; // @[Edges.scala:232:43]
wire d_first_last = 1'h1; // @[Edges.scala:232:33]
wire _a_first_last_T_3 = 1'h1; // @[Edges.scala:232:43]
wire a_first_last_1 = 1'h1; // @[Edges.scala:232:33]
wire _d_first_last_T_3 = 1'h1; // @[Edges.scala:232:43]
wire d_first_last_1 = 1'h1; // @[Edges.scala:232:33]
wire c_first_counter1 = 1'h1; // @[Edges.scala:230:28]
wire c_first = 1'h1; // @[Edges.scala:231:25]
wire _c_first_last_T_1 = 1'h1; // @[Edges.scala:232:43]
wire c_first_last = 1'h1; // @[Edges.scala:232:33]
wire _d_first_last_T_5 = 1'h1; // @[Edges.scala:232:43]
wire d_first_last_2 = 1'h1; // @[Edges.scala:232:33]
wire [1:0] _c_first_counter1_T = 2'h3; // @[Edges.scala:230:28]
wire [1:0] io_in_d_bits_param = 2'h0; // @[Monitor.scala:36:7]
wire [1:0] _c_first_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_first_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _c_first_WIRE_2_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_first_WIRE_3_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _c_set_wo_ready_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_set_wo_ready_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _c_set_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_set_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _c_opcodes_set_interm_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_opcodes_set_interm_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _c_sizes_set_interm_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_sizes_set_interm_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _c_opcodes_set_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_opcodes_set_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _c_sizes_set_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_sizes_set_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _c_probe_ack_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_probe_ack_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _c_probe_ack_WIRE_2_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_probe_ack_WIRE_3_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _same_cycle_resp_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _same_cycle_resp_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _same_cycle_resp_WIRE_2_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _same_cycle_resp_WIRE_3_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _same_cycle_resp_WIRE_4_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _same_cycle_resp_WIRE_5_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_first_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_first_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_first_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_first_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_set_wo_ready_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_set_wo_ready_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_opcodes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_opcodes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_sizes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_sizes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_opcodes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_opcodes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_sizes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_sizes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_probe_ack_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_probe_ack_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_probe_ack_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_probe_ack_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_4_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_5_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [27:0] _c_first_WIRE_bits_address = 28'h0; // @[Bundles.scala:265:74]
wire [27:0] _c_first_WIRE_1_bits_address = 28'h0; // @[Bundles.scala:265:61]
wire [27:0] _c_first_WIRE_2_bits_address = 28'h0; // @[Bundles.scala:265:74]
wire [27:0] _c_first_WIRE_3_bits_address = 28'h0; // @[Bundles.scala:265:61]
wire [27:0] _c_set_wo_ready_WIRE_bits_address = 28'h0; // @[Bundles.scala:265:74]
wire [27:0] _c_set_wo_ready_WIRE_1_bits_address = 28'h0; // @[Bundles.scala:265:61]
wire [27:0] _c_set_WIRE_bits_address = 28'h0; // @[Bundles.scala:265:74]
wire [27:0] _c_set_WIRE_1_bits_address = 28'h0; // @[Bundles.scala:265:61]
wire [27:0] _c_opcodes_set_interm_WIRE_bits_address = 28'h0; // @[Bundles.scala:265:74]
wire [27:0] _c_opcodes_set_interm_WIRE_1_bits_address = 28'h0; // @[Bundles.scala:265:61]
wire [27:0] _c_sizes_set_interm_WIRE_bits_address = 28'h0; // @[Bundles.scala:265:74]
wire [27:0] _c_sizes_set_interm_WIRE_1_bits_address = 28'h0; // @[Bundles.scala:265:61]
wire [27:0] _c_opcodes_set_WIRE_bits_address = 28'h0; // @[Bundles.scala:265:74]
wire [27:0] _c_opcodes_set_WIRE_1_bits_address = 28'h0; // @[Bundles.scala:265:61]
wire [27:0] _c_sizes_set_WIRE_bits_address = 28'h0; // @[Bundles.scala:265:74]
wire [27:0] _c_sizes_set_WIRE_1_bits_address = 28'h0; // @[Bundles.scala:265:61]
wire [27:0] _c_probe_ack_WIRE_bits_address = 28'h0; // @[Bundles.scala:265:74]
wire [27:0] _c_probe_ack_WIRE_1_bits_address = 28'h0; // @[Bundles.scala:265:61]
wire [27:0] _c_probe_ack_WIRE_2_bits_address = 28'h0; // @[Bundles.scala:265:74]
wire [27:0] _c_probe_ack_WIRE_3_bits_address = 28'h0; // @[Bundles.scala:265:61]
wire [27:0] _same_cycle_resp_WIRE_bits_address = 28'h0; // @[Bundles.scala:265:74]
wire [27:0] _same_cycle_resp_WIRE_1_bits_address = 28'h0; // @[Bundles.scala:265:61]
wire [27:0] _same_cycle_resp_WIRE_2_bits_address = 28'h0; // @[Bundles.scala:265:74]
wire [27:0] _same_cycle_resp_WIRE_3_bits_address = 28'h0; // @[Bundles.scala:265:61]
wire [27:0] _same_cycle_resp_WIRE_4_bits_address = 28'h0; // @[Bundles.scala:265:74]
wire [27:0] _same_cycle_resp_WIRE_5_bits_address = 28'h0; // @[Bundles.scala:265:61]
wire [10:0] _c_first_WIRE_bits_source = 11'h0; // @[Bundles.scala:265:74]
wire [10:0] _c_first_WIRE_1_bits_source = 11'h0; // @[Bundles.scala:265:61]
wire [10:0] _c_first_WIRE_2_bits_source = 11'h0; // @[Bundles.scala:265:74]
wire [10:0] _c_first_WIRE_3_bits_source = 11'h0; // @[Bundles.scala:265:61]
wire [10:0] _c_set_wo_ready_WIRE_bits_source = 11'h0; // @[Bundles.scala:265:74]
wire [10:0] _c_set_wo_ready_WIRE_1_bits_source = 11'h0; // @[Bundles.scala:265:61]
wire [10:0] _c_set_WIRE_bits_source = 11'h0; // @[Bundles.scala:265:74]
wire [10:0] _c_set_WIRE_1_bits_source = 11'h0; // @[Bundles.scala:265:61]
wire [10:0] _c_opcodes_set_interm_WIRE_bits_source = 11'h0; // @[Bundles.scala:265:74]
wire [10:0] _c_opcodes_set_interm_WIRE_1_bits_source = 11'h0; // @[Bundles.scala:265:61]
wire [10:0] _c_sizes_set_interm_WIRE_bits_source = 11'h0; // @[Bundles.scala:265:74]
wire [10:0] _c_sizes_set_interm_WIRE_1_bits_source = 11'h0; // @[Bundles.scala:265:61]
wire [10:0] _c_opcodes_set_WIRE_bits_source = 11'h0; // @[Bundles.scala:265:74]
wire [10:0] _c_opcodes_set_WIRE_1_bits_source = 11'h0; // @[Bundles.scala:265:61]
wire [10:0] _c_sizes_set_WIRE_bits_source = 11'h0; // @[Bundles.scala:265:74]
wire [10:0] _c_sizes_set_WIRE_1_bits_source = 11'h0; // @[Bundles.scala:265:61]
wire [10:0] _c_probe_ack_WIRE_bits_source = 11'h0; // @[Bundles.scala:265:74]
wire [10:0] _c_probe_ack_WIRE_1_bits_source = 11'h0; // @[Bundles.scala:265:61]
wire [10:0] _c_probe_ack_WIRE_2_bits_source = 11'h0; // @[Bundles.scala:265:74]
wire [10:0] _c_probe_ack_WIRE_3_bits_source = 11'h0; // @[Bundles.scala:265:61]
wire [10:0] _same_cycle_resp_WIRE_bits_source = 11'h0; // @[Bundles.scala:265:74]
wire [10:0] _same_cycle_resp_WIRE_1_bits_source = 11'h0; // @[Bundles.scala:265:61]
wire [10:0] _same_cycle_resp_WIRE_2_bits_source = 11'h0; // @[Bundles.scala:265:74]
wire [10:0] _same_cycle_resp_WIRE_3_bits_source = 11'h0; // @[Bundles.scala:265:61]
wire [10:0] _same_cycle_resp_WIRE_4_bits_source = 11'h0; // @[Bundles.scala:265:74]
wire [10:0] _same_cycle_resp_WIRE_5_bits_source = 11'h0; // @[Bundles.scala:265:61]
wire [2:0] responseMap_0 = 3'h0; // @[Monitor.scala:643:42]
wire [2:0] responseMap_1 = 3'h0; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_0 = 3'h0; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_1 = 3'h0; // @[Monitor.scala:644:42]
wire [2:0] _c_first_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_beats1_decode_T_2 = 3'h0; // @[package.scala:243:46]
wire [2:0] c_sizes_set_interm = 3'h0; // @[Monitor.scala:755:40]
wire [2:0] _c_set_wo_ready_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_T = 3'h0; // @[Monitor.scala:766:51]
wire [2:0] _c_opcodes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_4_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_4_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_5_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_5_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [15:0] _a_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _a_size_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _d_opcodes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _d_sizes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _c_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _c_size_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _d_opcodes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _d_sizes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57]
wire [16:0] _a_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _a_size_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _d_opcodes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _d_sizes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _c_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _c_size_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _d_opcodes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _d_sizes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57]
wire [15:0] _a_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _a_size_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _d_opcodes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _d_sizes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _c_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _c_size_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _d_opcodes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _d_sizes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51]
wire [16385:0] _c_sizes_set_T_1 = 16386'h0; // @[Monitor.scala:768:52]
wire [13:0] _c_opcodes_set_T = 14'h0; // @[Monitor.scala:767:79]
wire [13:0] _c_sizes_set_T = 14'h0; // @[Monitor.scala:768:77]
wire [16386:0] _c_opcodes_set_T_1 = 16387'h0; // @[Monitor.scala:767:54]
wire [2:0] responseMap_2 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMap_3 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMap_4 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_2 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_3 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_4 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] _c_sizes_set_interm_T_1 = 3'h1; // @[Monitor.scala:766:59]
wire [3:0] _c_opcodes_set_interm_T_1 = 4'h1; // @[Monitor.scala:765:61]
wire [3:0] c_opcodes_set_interm = 4'h0; // @[Monitor.scala:754:40]
wire [3:0] _c_opcodes_set_interm_T = 4'h0; // @[Monitor.scala:765:53]
wire [2047:0] _c_set_wo_ready_T = 2048'h1; // @[OneHot.scala:58:35]
wire [2047:0] _c_set_T = 2048'h1; // @[OneHot.scala:58:35]
wire [4159:0] c_opcodes_set = 4160'h0; // @[Monitor.scala:740:34]
wire [4159:0] c_sizes_set = 4160'h0; // @[Monitor.scala:741:34]
wire [1039:0] c_set = 1040'h0; // @[Monitor.scala:738:34]
wire [1039:0] c_set_wo_ready = 1040'h0; // @[Monitor.scala:739:34]
wire [2:0] _c_first_beats1_decode_T_1 = 3'h7; // @[package.scala:243:76]
wire [5:0] _c_first_beats1_decode_T = 6'h7; // @[package.scala:243:71]
wire [2:0] responseMap_6 = 3'h4; // @[Monitor.scala:643:42]
wire [2:0] responseMap_7 = 3'h4; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_7 = 3'h4; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_6 = 3'h5; // @[Monitor.scala:644:42]
wire [2:0] responseMap_5 = 3'h2; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_5 = 3'h2; // @[Monitor.scala:644:42]
wire [3:0] _a_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:637:123]
wire [3:0] _a_size_lookup_T_2 = 4'h4; // @[Monitor.scala:641:117]
wire [3:0] _d_opcodes_clr_T = 4'h4; // @[Monitor.scala:680:48]
wire [3:0] _d_sizes_clr_T = 4'h4; // @[Monitor.scala:681:48]
wire [3:0] _c_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:749:123]
wire [3:0] _c_size_lookup_T_2 = 4'h4; // @[Monitor.scala:750:119]
wire [3:0] _d_opcodes_clr_T_6 = 4'h4; // @[Monitor.scala:790:48]
wire [3:0] _d_sizes_clr_T_6 = 4'h4; // @[Monitor.scala:791:48]
wire [10:0] _source_ok_uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [10:0] _uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [10:0] _uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [10:0] _uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [10:0] _uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [10:0] _uncommonBits_T_4 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [10:0] _uncommonBits_T_5 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [10:0] _uncommonBits_T_6 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [10:0] _uncommonBits_T_7 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [10:0] _uncommonBits_T_8 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [10:0] _source_ok_uncommonBits_T_1 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [10:0] source_ok_uncommonBits = _source_ok_uncommonBits_T; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_4 = source_ok_uncommonBits < 11'h410; // @[Parameters.scala:52:56, :57:20]
wire _source_ok_T_5 = _source_ok_T_4; // @[Parameters.scala:56:48, :57:20]
wire _source_ok_WIRE_0 = _source_ok_T_5; // @[Parameters.scala:1138:31]
wire [5:0] _GEN = 6'h7 << io_in_a_bits_size_0; // @[package.scala:243:71]
wire [5:0] _is_aligned_mask_T; // @[package.scala:243:71]
assign _is_aligned_mask_T = _GEN; // @[package.scala:243:71]
wire [5:0] _a_first_beats1_decode_T; // @[package.scala:243:71]
assign _a_first_beats1_decode_T = _GEN; // @[package.scala:243:71]
wire [5:0] _a_first_beats1_decode_T_3; // @[package.scala:243:71]
assign _a_first_beats1_decode_T_3 = _GEN; // @[package.scala:243:71]
wire [2:0] _is_aligned_mask_T_1 = _is_aligned_mask_T[2:0]; // @[package.scala:243:{71,76}]
wire [2:0] is_aligned_mask = ~_is_aligned_mask_T_1; // @[package.scala:243:{46,76}]
wire [27:0] _is_aligned_T = {25'h0, io_in_a_bits_address_0[2:0] & is_aligned_mask}; // @[package.scala:243:46]
wire is_aligned = _is_aligned_T == 28'h0; // @[Edges.scala:21:{16,24}]
wire [2:0] _mask_sizeOH_T = {1'h0, io_in_a_bits_size_0}; // @[Misc.scala:202:34]
wire [1:0] mask_sizeOH_shiftAmount = _mask_sizeOH_T[1:0]; // @[OneHot.scala:64:49]
wire [3:0] _mask_sizeOH_T_1 = 4'h1 << mask_sizeOH_shiftAmount; // @[OneHot.scala:64:49, :65:12]
wire [2:0] _mask_sizeOH_T_2 = _mask_sizeOH_T_1[2:0]; // @[OneHot.scala:65:{12,27}]
wire [2:0] mask_sizeOH = {_mask_sizeOH_T_2[2:1], 1'h1}; // @[OneHot.scala:65:27]
wire mask_sub_sub_sub_0_1 = &io_in_a_bits_size_0; // @[Misc.scala:206:21]
wire mask_sub_sub_size = mask_sizeOH[2]; // @[Misc.scala:202:81, :209:26]
wire mask_sub_sub_bit = io_in_a_bits_address_0[2]; // @[Misc.scala:210:26]
wire mask_sub_sub_1_2 = mask_sub_sub_bit; // @[Misc.scala:210:26, :214:27]
wire mask_sub_sub_nbit = ~mask_sub_sub_bit; // @[Misc.scala:210:26, :211:20]
wire mask_sub_sub_0_2 = mask_sub_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_sub_acc_T = mask_sub_sub_size & mask_sub_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_sub_0_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T; // @[Misc.scala:206:21, :215:{29,38}]
wire _mask_sub_sub_acc_T_1 = mask_sub_sub_size & mask_sub_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_sub_1_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T_1; // @[Misc.scala:206:21, :215:{29,38}]
wire mask_sub_size = mask_sizeOH[1]; // @[Misc.scala:202:81, :209:26]
wire mask_sub_bit = io_in_a_bits_address_0[1]; // @[Misc.scala:210:26]
wire mask_sub_nbit = ~mask_sub_bit; // @[Misc.scala:210:26, :211:20]
wire mask_sub_0_2 = mask_sub_sub_0_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_acc_T = mask_sub_size & mask_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_0_1 = mask_sub_sub_0_1 | _mask_sub_acc_T; // @[Misc.scala:215:{29,38}]
wire mask_sub_1_2 = mask_sub_sub_0_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_sub_acc_T_1 = mask_sub_size & mask_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_1_1 = mask_sub_sub_0_1 | _mask_sub_acc_T_1; // @[Misc.scala:215:{29,38}]
wire mask_sub_2_2 = mask_sub_sub_1_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_acc_T_2 = mask_sub_size & mask_sub_2_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_2_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_2; // @[Misc.scala:215:{29,38}]
wire mask_sub_3_2 = mask_sub_sub_1_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_sub_acc_T_3 = mask_sub_size & mask_sub_3_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_3_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_3; // @[Misc.scala:215:{29,38}]
wire mask_size = mask_sizeOH[0]; // @[Misc.scala:202:81, :209:26]
wire mask_bit = io_in_a_bits_address_0[0]; // @[Misc.scala:210:26]
wire mask_nbit = ~mask_bit; // @[Misc.scala:210:26, :211:20]
wire mask_eq = mask_sub_0_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T = mask_size & mask_eq; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc = mask_sub_0_1 | _mask_acc_T; // @[Misc.scala:215:{29,38}]
wire mask_eq_1 = mask_sub_0_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_1 = mask_size & mask_eq_1; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_1 = mask_sub_0_1 | _mask_acc_T_1; // @[Misc.scala:215:{29,38}]
wire mask_eq_2 = mask_sub_1_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_2 = mask_size & mask_eq_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_2 = mask_sub_1_1 | _mask_acc_T_2; // @[Misc.scala:215:{29,38}]
wire mask_eq_3 = mask_sub_1_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_3 = mask_size & mask_eq_3; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_3 = mask_sub_1_1 | _mask_acc_T_3; // @[Misc.scala:215:{29,38}]
wire mask_eq_4 = mask_sub_2_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_4 = mask_size & mask_eq_4; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_4 = mask_sub_2_1 | _mask_acc_T_4; // @[Misc.scala:215:{29,38}]
wire mask_eq_5 = mask_sub_2_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_5 = mask_size & mask_eq_5; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_5 = mask_sub_2_1 | _mask_acc_T_5; // @[Misc.scala:215:{29,38}]
wire mask_eq_6 = mask_sub_3_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_6 = mask_size & mask_eq_6; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_6 = mask_sub_3_1 | _mask_acc_T_6; // @[Misc.scala:215:{29,38}]
wire mask_eq_7 = mask_sub_3_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_7 = mask_size & mask_eq_7; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_7 = mask_sub_3_1 | _mask_acc_T_7; // @[Misc.scala:215:{29,38}]
wire [1:0] mask_lo_lo = {mask_acc_1, mask_acc}; // @[Misc.scala:215:29, :222:10]
wire [1:0] mask_lo_hi = {mask_acc_3, mask_acc_2}; // @[Misc.scala:215:29, :222:10]
wire [3:0] mask_lo = {mask_lo_hi, mask_lo_lo}; // @[Misc.scala:222:10]
wire [1:0] mask_hi_lo = {mask_acc_5, mask_acc_4}; // @[Misc.scala:215:29, :222:10]
wire [1:0] mask_hi_hi = {mask_acc_7, mask_acc_6}; // @[Misc.scala:215:29, :222:10]
wire [3:0] mask_hi = {mask_hi_hi, mask_hi_lo}; // @[Misc.scala:222:10]
wire [7:0] mask = {mask_hi, mask_lo}; // @[Misc.scala:222:10]
wire [10:0] uncommonBits = _uncommonBits_T; // @[Parameters.scala:52:{29,56}]
wire [10:0] uncommonBits_1 = _uncommonBits_T_1; // @[Parameters.scala:52:{29,56}]
wire [10:0] uncommonBits_2 = _uncommonBits_T_2; // @[Parameters.scala:52:{29,56}]
wire [10:0] uncommonBits_3 = _uncommonBits_T_3; // @[Parameters.scala:52:{29,56}]
wire [10:0] uncommonBits_4 = _uncommonBits_T_4; // @[Parameters.scala:52:{29,56}]
wire [10:0] uncommonBits_5 = _uncommonBits_T_5; // @[Parameters.scala:52:{29,56}]
wire [10:0] uncommonBits_6 = _uncommonBits_T_6; // @[Parameters.scala:52:{29,56}]
wire [10:0] uncommonBits_7 = _uncommonBits_T_7; // @[Parameters.scala:52:{29,56}]
wire [10:0] uncommonBits_8 = _uncommonBits_T_8; // @[Parameters.scala:52:{29,56}]
wire [10:0] source_ok_uncommonBits_1 = _source_ok_uncommonBits_T_1; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_10 = source_ok_uncommonBits_1 < 11'h410; // @[Parameters.scala:52:56, :57:20]
wire _source_ok_T_11 = _source_ok_T_10; // @[Parameters.scala:56:48, :57:20]
wire _source_ok_WIRE_1_0 = _source_ok_T_11; // @[Parameters.scala:1138:31]
wire _T_672 = io_in_a_ready_0 & io_in_a_valid_0; // @[Decoupled.scala:51:35]
wire _a_first_T; // @[Decoupled.scala:51:35]
assign _a_first_T = _T_672; // @[Decoupled.scala:51:35]
wire _a_first_T_1; // @[Decoupled.scala:51:35]
assign _a_first_T_1 = _T_672; // @[Decoupled.scala:51:35]
wire a_first_done = _a_first_T; // @[Decoupled.scala:51:35]
wire [2:0] _a_first_beats1_decode_T_1 = _a_first_beats1_decode_T[2:0]; // @[package.scala:243:{71,76}]
wire [2:0] _a_first_beats1_decode_T_2 = ~_a_first_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire _a_first_beats1_opdata_T = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7]
wire _a_first_beats1_opdata_T_1 = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7]
wire a_first_beats1_opdata = ~_a_first_beats1_opdata_T; // @[Edges.scala:92:{28,37}]
reg a_first_counter; // @[Edges.scala:229:27]
wire _a_first_last_T = a_first_counter; // @[Edges.scala:229:27, :232:25]
wire [1:0] _a_first_counter1_T = {1'h0, a_first_counter} - 2'h1; // @[Edges.scala:229:27, :230:28]
wire a_first_counter1 = _a_first_counter1_T[0]; // @[Edges.scala:230:28]
wire a_first = ~a_first_counter; // @[Edges.scala:229:27, :231:25]
wire _a_first_count_T = ~a_first_counter1; // @[Edges.scala:230:28, :234:27]
wire _a_first_counter_T = ~a_first & a_first_counter1; // @[Edges.scala:230:28, :231:25, :236:21]
reg [2:0] opcode; // @[Monitor.scala:387:22]
reg [2:0] param; // @[Monitor.scala:388:22]
reg [1:0] size; // @[Monitor.scala:389:22]
reg [10:0] source; // @[Monitor.scala:390:22]
reg [27:0] address; // @[Monitor.scala:391:22]
wire _T_745 = io_in_d_ready_0 & io_in_d_valid_0; // @[Decoupled.scala:51:35]
wire _d_first_T; // @[Decoupled.scala:51:35]
assign _d_first_T = _T_745; // @[Decoupled.scala:51:35]
wire _d_first_T_1; // @[Decoupled.scala:51:35]
assign _d_first_T_1 = _T_745; // @[Decoupled.scala:51:35]
wire _d_first_T_2; // @[Decoupled.scala:51:35]
assign _d_first_T_2 = _T_745; // @[Decoupled.scala:51:35]
wire d_first_done = _d_first_T; // @[Decoupled.scala:51:35]
wire [5:0] _GEN_0 = 6'h7 << io_in_d_bits_size_0; // @[package.scala:243:71]
wire [5:0] _d_first_beats1_decode_T; // @[package.scala:243:71]
assign _d_first_beats1_decode_T = _GEN_0; // @[package.scala:243:71]
wire [5:0] _d_first_beats1_decode_T_3; // @[package.scala:243:71]
assign _d_first_beats1_decode_T_3 = _GEN_0; // @[package.scala:243:71]
wire [5:0] _d_first_beats1_decode_T_6; // @[package.scala:243:71]
assign _d_first_beats1_decode_T_6 = _GEN_0; // @[package.scala:243:71]
wire [2:0] _d_first_beats1_decode_T_1 = _d_first_beats1_decode_T[2:0]; // @[package.scala:243:{71,76}]
wire [2:0] _d_first_beats1_decode_T_2 = ~_d_first_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire d_first_beats1_opdata = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire d_first_beats1_opdata_1 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire d_first_beats1_opdata_2 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
reg d_first_counter; // @[Edges.scala:229:27]
wire _d_first_last_T = d_first_counter; // @[Edges.scala:229:27, :232:25]
wire [1:0] _d_first_counter1_T = {1'h0, d_first_counter} - 2'h1; // @[Edges.scala:229:27, :230:28]
wire d_first_counter1 = _d_first_counter1_T[0]; // @[Edges.scala:230:28]
wire d_first = ~d_first_counter; // @[Edges.scala:229:27, :231:25]
wire _d_first_count_T = ~d_first_counter1; // @[Edges.scala:230:28, :234:27]
wire _d_first_counter_T = ~d_first & d_first_counter1; // @[Edges.scala:230:28, :231:25, :236:21]
reg [2:0] opcode_1; // @[Monitor.scala:538:22]
reg [1:0] size_1; // @[Monitor.scala:540:22]
reg [10:0] source_1; // @[Monitor.scala:541:22]
reg [1039:0] inflight; // @[Monitor.scala:614:27]
reg [4159:0] inflight_opcodes; // @[Monitor.scala:616:35]
reg [4159:0] inflight_sizes; // @[Monitor.scala:618:33]
wire a_first_done_1 = _a_first_T_1; // @[Decoupled.scala:51:35]
wire [2:0] _a_first_beats1_decode_T_4 = _a_first_beats1_decode_T_3[2:0]; // @[package.scala:243:{71,76}]
wire [2:0] _a_first_beats1_decode_T_5 = ~_a_first_beats1_decode_T_4; // @[package.scala:243:{46,76}]
wire a_first_beats1_opdata_1 = ~_a_first_beats1_opdata_T_1; // @[Edges.scala:92:{28,37}]
reg a_first_counter_1; // @[Edges.scala:229:27]
wire _a_first_last_T_2 = a_first_counter_1; // @[Edges.scala:229:27, :232:25]
wire [1:0] _a_first_counter1_T_1 = {1'h0, a_first_counter_1} - 2'h1; // @[Edges.scala:229:27, :230:28]
wire a_first_counter1_1 = _a_first_counter1_T_1[0]; // @[Edges.scala:230:28]
wire a_first_1 = ~a_first_counter_1; // @[Edges.scala:229:27, :231:25]
wire _a_first_count_T_1 = ~a_first_counter1_1; // @[Edges.scala:230:28, :234:27]
wire _a_first_counter_T_1 = ~a_first_1 & a_first_counter1_1; // @[Edges.scala:230:28, :231:25, :236:21]
wire d_first_done_1 = _d_first_T_1; // @[Decoupled.scala:51:35]
wire [2:0] _d_first_beats1_decode_T_4 = _d_first_beats1_decode_T_3[2:0]; // @[package.scala:243:{71,76}]
wire [2:0] _d_first_beats1_decode_T_5 = ~_d_first_beats1_decode_T_4; // @[package.scala:243:{46,76}]
reg d_first_counter_1; // @[Edges.scala:229:27]
wire _d_first_last_T_2 = d_first_counter_1; // @[Edges.scala:229:27, :232:25]
wire [1:0] _d_first_counter1_T_1 = {1'h0, d_first_counter_1} - 2'h1; // @[Edges.scala:229:27, :230:28]
wire d_first_counter1_1 = _d_first_counter1_T_1[0]; // @[Edges.scala:230:28]
wire d_first_1 = ~d_first_counter_1; // @[Edges.scala:229:27, :231:25]
wire _d_first_count_T_1 = ~d_first_counter1_1; // @[Edges.scala:230:28, :234:27]
wire _d_first_counter_T_1 = ~d_first_1 & d_first_counter1_1; // @[Edges.scala:230:28, :231:25, :236:21]
wire [1039:0] a_set; // @[Monitor.scala:626:34]
wire [1039:0] a_set_wo_ready; // @[Monitor.scala:627:34]
wire [4159:0] a_opcodes_set; // @[Monitor.scala:630:33]
wire [4159:0] a_sizes_set; // @[Monitor.scala:632:31]
wire [2:0] a_opcode_lookup; // @[Monitor.scala:635:35]
wire [13:0] _GEN_1 = {1'h0, io_in_d_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :637:69]
wire [13:0] _a_opcode_lookup_T; // @[Monitor.scala:637:69]
assign _a_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69]
wire [13:0] _a_size_lookup_T; // @[Monitor.scala:641:65]
assign _a_size_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :641:65]
wire [13:0] _d_opcodes_clr_T_4; // @[Monitor.scala:680:101]
assign _d_opcodes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :680:101]
wire [13:0] _d_sizes_clr_T_4; // @[Monitor.scala:681:99]
assign _d_sizes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :681:99]
wire [13:0] _c_opcode_lookup_T; // @[Monitor.scala:749:69]
assign _c_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :749:69]
wire [13:0] _c_size_lookup_T; // @[Monitor.scala:750:67]
assign _c_size_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :750:67]
wire [13:0] _d_opcodes_clr_T_10; // @[Monitor.scala:790:101]
assign _d_opcodes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :790:101]
wire [13:0] _d_sizes_clr_T_10; // @[Monitor.scala:791:99]
assign _d_sizes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :791:99]
wire [4159:0] _a_opcode_lookup_T_1 = inflight_opcodes >> _a_opcode_lookup_T; // @[Monitor.scala:616:35, :637:{44,69}]
wire [4159:0] _a_opcode_lookup_T_6 = {4156'h0, _a_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:637:{44,97}]
wire [4159:0] _a_opcode_lookup_T_7 = {1'h0, _a_opcode_lookup_T_6[4159:1]}; // @[Monitor.scala:637:{97,152}]
assign a_opcode_lookup = _a_opcode_lookup_T_7[2:0]; // @[Monitor.scala:635:35, :637:{21,152}]
wire [3:0] a_size_lookup; // @[Monitor.scala:639:33]
wire [4159:0] _a_size_lookup_T_1 = inflight_sizes >> _a_size_lookup_T; // @[Monitor.scala:618:33, :641:{40,65}]
wire [4159:0] _a_size_lookup_T_6 = {4156'h0, _a_size_lookup_T_1[3:0]}; // @[Monitor.scala:641:{40,91}]
wire [4159:0] _a_size_lookup_T_7 = {1'h0, _a_size_lookup_T_6[4159:1]}; // @[Monitor.scala:641:{91,144}]
assign a_size_lookup = _a_size_lookup_T_7[3:0]; // @[Monitor.scala:639:33, :641:{19,144}]
wire [3:0] a_opcodes_set_interm; // @[Monitor.scala:646:40]
wire [2:0] a_sizes_set_interm; // @[Monitor.scala:648:38]
wire _same_cycle_resp_T = io_in_a_valid_0 & a_first_1; // @[Monitor.scala:36:7, :651:26, :684:44]
wire [2047:0] _GEN_2 = 2048'h1 << io_in_a_bits_source_0; // @[OneHot.scala:58:35]
wire [2047:0] _a_set_wo_ready_T; // @[OneHot.scala:58:35]
assign _a_set_wo_ready_T = _GEN_2; // @[OneHot.scala:58:35]
wire [2047:0] _a_set_T; // @[OneHot.scala:58:35]
assign _a_set_T = _GEN_2; // @[OneHot.scala:58:35]
assign a_set_wo_ready = _same_cycle_resp_T ? _a_set_wo_ready_T[1039:0] : 1040'h0; // @[OneHot.scala:58:35]
wire _T_598 = _T_672 & a_first_1; // @[Decoupled.scala:51:35]
assign a_set = _T_598 ? _a_set_T[1039:0] : 1040'h0; // @[OneHot.scala:58:35]
wire [3:0] _a_opcodes_set_interm_T = {io_in_a_bits_opcode_0, 1'h0}; // @[Monitor.scala:36:7, :657:53]
wire [3:0] _a_opcodes_set_interm_T_1 = {_a_opcodes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:657:{53,61}]
assign a_opcodes_set_interm = _T_598 ? _a_opcodes_set_interm_T_1 : 4'h0; // @[Monitor.scala:646:40, :655:{25,70}, :657:{28,61}]
wire [2:0] _a_sizes_set_interm_T = {io_in_a_bits_size_0, 1'h0}; // @[Monitor.scala:36:7, :658:51]
wire [2:0] _a_sizes_set_interm_T_1 = {_a_sizes_set_interm_T[2:1], 1'h1}; // @[Monitor.scala:658:{51,59}]
assign a_sizes_set_interm = _T_598 ? _a_sizes_set_interm_T_1 : 3'h0; // @[Monitor.scala:648:38, :655:{25,70}, :658:{28,59}]
wire [13:0] _GEN_3 = {1'h0, io_in_a_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :659:79]
wire [13:0] _a_opcodes_set_T; // @[Monitor.scala:659:79]
assign _a_opcodes_set_T = _GEN_3; // @[Monitor.scala:659:79]
wire [13:0] _a_sizes_set_T; // @[Monitor.scala:660:77]
assign _a_sizes_set_T = _GEN_3; // @[Monitor.scala:659:79, :660:77]
wire [16386:0] _a_opcodes_set_T_1 = {16383'h0, a_opcodes_set_interm} << _a_opcodes_set_T; // @[Monitor.scala:646:40, :659:{54,79}]
assign a_opcodes_set = _T_598 ? _a_opcodes_set_T_1[4159:0] : 4160'h0; // @[Monitor.scala:630:33, :655:{25,70}, :659:{28,54}]
wire [16385:0] _a_sizes_set_T_1 = {16383'h0, a_sizes_set_interm} << _a_sizes_set_T; // @[Monitor.scala:648:38, :659:54, :660:{52,77}]
assign a_sizes_set = _T_598 ? _a_sizes_set_T_1[4159:0] : 4160'h0; // @[Monitor.scala:632:31, :655:{25,70}, :660:{28,52}]
wire [1039:0] d_clr; // @[Monitor.scala:664:34]
wire [1039:0] d_clr_wo_ready; // @[Monitor.scala:665:34]
wire [4159:0] d_opcodes_clr; // @[Monitor.scala:668:33]
wire [4159:0] d_sizes_clr; // @[Monitor.scala:670:31]
wire _GEN_4 = io_in_d_bits_opcode_0 == 3'h6; // @[Monitor.scala:36:7, :673:46]
wire d_release_ack; // @[Monitor.scala:673:46]
assign d_release_ack = _GEN_4; // @[Monitor.scala:673:46]
wire d_release_ack_1; // @[Monitor.scala:783:46]
assign d_release_ack_1 = _GEN_4; // @[Monitor.scala:673:46, :783:46]
wire _T_644 = io_in_d_valid_0 & d_first_1; // @[Monitor.scala:36:7, :674:26]
wire [2047:0] _GEN_5 = 2048'h1 << io_in_d_bits_source_0; // @[OneHot.scala:58:35]
wire [2047:0] _d_clr_wo_ready_T; // @[OneHot.scala:58:35]
assign _d_clr_wo_ready_T = _GEN_5; // @[OneHot.scala:58:35]
wire [2047:0] _d_clr_T; // @[OneHot.scala:58:35]
assign _d_clr_T = _GEN_5; // @[OneHot.scala:58:35]
wire [2047:0] _d_clr_wo_ready_T_1; // @[OneHot.scala:58:35]
assign _d_clr_wo_ready_T_1 = _GEN_5; // @[OneHot.scala:58:35]
wire [2047:0] _d_clr_T_1; // @[OneHot.scala:58:35]
assign _d_clr_T_1 = _GEN_5; // @[OneHot.scala:58:35]
assign d_clr_wo_ready = _T_644 & ~d_release_ack ? _d_clr_wo_ready_T[1039:0] : 1040'h0; // @[OneHot.scala:58:35]
wire _T_613 = _T_745 & d_first_1 & ~d_release_ack; // @[Decoupled.scala:51:35]
assign d_clr = _T_613 ? _d_clr_T[1039:0] : 1040'h0; // @[OneHot.scala:58:35]
wire [16398:0] _d_opcodes_clr_T_5 = 16399'hF << _d_opcodes_clr_T_4; // @[Monitor.scala:680:{76,101}]
assign d_opcodes_clr = _T_613 ? _d_opcodes_clr_T_5[4159:0] : 4160'h0; // @[Monitor.scala:668:33, :678:{25,70,89}, :680:{21,76}]
wire [16398:0] _d_sizes_clr_T_5 = 16399'hF << _d_sizes_clr_T_4; // @[Monitor.scala:681:{74,99}]
assign d_sizes_clr = _T_613 ? _d_sizes_clr_T_5[4159:0] : 4160'h0; // @[Monitor.scala:670:31, :678:{25,70,89}, :681:{21,74}]
wire _same_cycle_resp_T_1 = _same_cycle_resp_T; // @[Monitor.scala:684:{44,55}]
wire _same_cycle_resp_T_2 = io_in_a_bits_source_0 == io_in_d_bits_source_0; // @[Monitor.scala:36:7, :684:113]
wire same_cycle_resp = _same_cycle_resp_T_1 & _same_cycle_resp_T_2; // @[Monitor.scala:684:{55,88,113}]
wire [1039:0] _inflight_T = inflight | a_set; // @[Monitor.scala:614:27, :626:34, :705:27]
wire [1039:0] _inflight_T_1 = ~d_clr; // @[Monitor.scala:664:34, :705:38]
wire [1039:0] _inflight_T_2 = _inflight_T & _inflight_T_1; // @[Monitor.scala:705:{27,36,38}]
wire [4159:0] _inflight_opcodes_T = inflight_opcodes | a_opcodes_set; // @[Monitor.scala:616:35, :630:33, :706:43]
wire [4159:0] _inflight_opcodes_T_1 = ~d_opcodes_clr; // @[Monitor.scala:668:33, :706:62]
wire [4159:0] _inflight_opcodes_T_2 = _inflight_opcodes_T & _inflight_opcodes_T_1; // @[Monitor.scala:706:{43,60,62}]
wire [4159:0] _inflight_sizes_T = inflight_sizes | a_sizes_set; // @[Monitor.scala:618:33, :632:31, :707:39]
wire [4159:0] _inflight_sizes_T_1 = ~d_sizes_clr; // @[Monitor.scala:670:31, :707:56]
wire [4159:0] _inflight_sizes_T_2 = _inflight_sizes_T & _inflight_sizes_T_1; // @[Monitor.scala:707:{39,54,56}]
reg [31:0] watchdog; // @[Monitor.scala:709:27]
wire [32:0] _watchdog_T = {1'h0, watchdog} + 33'h1; // @[Monitor.scala:709:27, :714:26]
wire [31:0] _watchdog_T_1 = _watchdog_T[31:0]; // @[Monitor.scala:714:26]
reg [1039:0] inflight_1; // @[Monitor.scala:726:35]
wire [1039:0] _inflight_T_3 = inflight_1; // @[Monitor.scala:726:35, :814:35]
reg [4159:0] inflight_opcodes_1; // @[Monitor.scala:727:35]
wire [4159:0] _inflight_opcodes_T_3 = inflight_opcodes_1; // @[Monitor.scala:727:35, :815:43]
reg [4159:0] inflight_sizes_1; // @[Monitor.scala:728:35]
wire [4159:0] _inflight_sizes_T_3 = inflight_sizes_1; // @[Monitor.scala:728:35, :816:41]
wire d_first_done_2 = _d_first_T_2; // @[Decoupled.scala:51:35]
wire [2:0] _d_first_beats1_decode_T_7 = _d_first_beats1_decode_T_6[2:0]; // @[package.scala:243:{71,76}]
wire [2:0] _d_first_beats1_decode_T_8 = ~_d_first_beats1_decode_T_7; // @[package.scala:243:{46,76}]
reg d_first_counter_2; // @[Edges.scala:229:27]
wire _d_first_last_T_4 = d_first_counter_2; // @[Edges.scala:229:27, :232:25]
wire [1:0] _d_first_counter1_T_2 = {1'h0, d_first_counter_2} - 2'h1; // @[Edges.scala:229:27, :230:28]
wire d_first_counter1_2 = _d_first_counter1_T_2[0]; // @[Edges.scala:230:28]
wire d_first_2 = ~d_first_counter_2; // @[Edges.scala:229:27, :231:25]
wire _d_first_count_T_2 = ~d_first_counter1_2; // @[Edges.scala:230:28, :234:27]
wire _d_first_counter_T_2 = ~d_first_2 & d_first_counter1_2; // @[Edges.scala:230:28, :231:25, :236:21]
wire [3:0] c_opcode_lookup; // @[Monitor.scala:747:35]
wire [3:0] c_size_lookup; // @[Monitor.scala:748:35]
wire [4159:0] _c_opcode_lookup_T_1 = inflight_opcodes_1 >> _c_opcode_lookup_T; // @[Monitor.scala:727:35, :749:{44,69}]
wire [4159:0] _c_opcode_lookup_T_6 = {4156'h0, _c_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:749:{44,97}]
wire [4159:0] _c_opcode_lookup_T_7 = {1'h0, _c_opcode_lookup_T_6[4159:1]}; // @[Monitor.scala:749:{97,152}]
assign c_opcode_lookup = _c_opcode_lookup_T_7[3:0]; // @[Monitor.scala:747:35, :749:{21,152}]
wire [4159:0] _c_size_lookup_T_1 = inflight_sizes_1 >> _c_size_lookup_T; // @[Monitor.scala:728:35, :750:{42,67}]
wire [4159:0] _c_size_lookup_T_6 = {4156'h0, _c_size_lookup_T_1[3:0]}; // @[Monitor.scala:750:{42,93}]
wire [4159:0] _c_size_lookup_T_7 = {1'h0, _c_size_lookup_T_6[4159:1]}; // @[Monitor.scala:750:{93,146}]
assign c_size_lookup = _c_size_lookup_T_7[3:0]; // @[Monitor.scala:748:35, :750:{21,146}]
wire [1039:0] d_clr_1; // @[Monitor.scala:774:34]
wire [1039:0] d_clr_wo_ready_1; // @[Monitor.scala:775:34]
wire [4159:0] d_opcodes_clr_1; // @[Monitor.scala:776:34]
wire [4159:0] d_sizes_clr_1; // @[Monitor.scala:777:34]
wire _T_716 = io_in_d_valid_0 & d_first_2; // @[Monitor.scala:36:7, :784:26]
assign d_clr_wo_ready_1 = _T_716 & d_release_ack_1 ? _d_clr_wo_ready_T_1[1039:0] : 1040'h0; // @[OneHot.scala:58:35]
wire _T_698 = _T_745 & d_first_2 & d_release_ack_1; // @[Decoupled.scala:51:35]
assign d_clr_1 = _T_698 ? _d_clr_T_1[1039:0] : 1040'h0; // @[OneHot.scala:58:35]
wire [16398:0] _d_opcodes_clr_T_11 = 16399'hF << _d_opcodes_clr_T_10; // @[Monitor.scala:790:{76,101}]
assign d_opcodes_clr_1 = _T_698 ? _d_opcodes_clr_T_11[4159:0] : 4160'h0; // @[Monitor.scala:776:34, :788:{25,70,88}, :790:{21,76}]
wire [16398:0] _d_sizes_clr_T_11 = 16399'hF << _d_sizes_clr_T_10; // @[Monitor.scala:791:{74,99}]
assign d_sizes_clr_1 = _T_698 ? _d_sizes_clr_T_11[4159:0] : 4160'h0; // @[Monitor.scala:777:34, :788:{25,70,88}, :791:{21,74}]
wire _same_cycle_resp_T_8 = io_in_d_bits_source_0 == 11'h0; // @[Monitor.scala:36:7, :795:113]
wire [1039:0] _inflight_T_4 = ~d_clr_1; // @[Monitor.scala:774:34, :814:46]
wire [1039:0] _inflight_T_5 = _inflight_T_3 & _inflight_T_4; // @[Monitor.scala:814:{35,44,46}]
wire [4159:0] _inflight_opcodes_T_4 = ~d_opcodes_clr_1; // @[Monitor.scala:776:34, :815:62]
wire [4159:0] _inflight_opcodes_T_5 = _inflight_opcodes_T_3 & _inflight_opcodes_T_4; // @[Monitor.scala:815:{43,60,62}]
wire [4159:0] _inflight_sizes_T_4 = ~d_sizes_clr_1; // @[Monitor.scala:777:34, :816:58]
wire [4159:0] _inflight_sizes_T_5 = _inflight_sizes_T_3 & _inflight_sizes_T_4; // @[Monitor.scala:816:{41,56,58}]
reg [31:0] watchdog_1; // @[Monitor.scala:818:27] |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File SynchronizerReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util.{RegEnable, Cat}
/** These wrap behavioral
* shift and next registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
*
* These are built up of *ResetSynchronizerPrimitiveShiftReg,
* intended to be replaced by the integrator's metastable flops chains or replaced
* at this level if they have a multi-bit wide synchronizer primitive.
* The different types vary in their reset behavior:
* NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin
* AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep
* 1-bit-wide shift registers.
* SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg
*
* [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference.
*
* ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross
* Clock Domains.
*/
object SynchronizerResetType extends Enumeration {
val NonSync, Inferred, Sync, Async = Value
}
// Note: this should not be used directly.
// Use the companion object to generate this with the correct reset type mixin.
private class SynchronizerPrimitiveShiftReg(
sync: Int,
init: Boolean,
resetType: SynchronizerResetType.Value)
extends AbstractPipelineReg(1) {
val initInt = if (init) 1 else 0
val initPostfix = resetType match {
case SynchronizerResetType.NonSync => ""
case _ => s"_i${initInt}"
}
override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}"
val chain = List.tabulate(sync) { i =>
val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B)
reg.suggestName(s"sync_$i")
}
chain.last := io.d.asBool
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink := source
}
io.q := chain.head.asUInt
}
private object SynchronizerPrimitiveShiftReg {
def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = {
val gen: () => SynchronizerPrimitiveShiftReg = resetType match {
case SynchronizerResetType.NonSync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
case SynchronizerResetType.Async =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset
case SynchronizerResetType.Sync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset
case SynchronizerResetType.Inferred =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
}
AbstractPipelineReg(gen(), in)
}
}
// Note: This module may end up with a non-AsyncReset type reset.
// But the Primitives within will always have AsyncReset type.
class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int)
extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asAsyncReset){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async)
}
}
io.q := Cat(output.reverse)
}
object AsyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
// Note: This module may end up with a non-Bool type reset.
// But the Primitives within will always have Bool reset type.
@deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2")
class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asBool){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync)
}
}
io.q := Cat(output.reverse)
}
object SyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred)
}
io.q := Cat(output.reverse)
}
object ResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}"
val output = Seq.tabulate(w) { i =>
SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync)
}
io.q := Cat(output.reverse)
}
object SynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, None)
def apply [T <: Data](in: T): T =
apply (in, 3, None)
}
class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module {
override def desiredName = s"ClockCrossingReg_w${w}"
val io = IO(new Bundle{
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
val en = Input(Bool())
})
val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en)
io.q := cdc_reg
}
object ClockCrossingReg {
def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = {
val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit))
name.foreach{ cdc_reg.suggestName(_) }
cdc_reg.io.d := in.asUInt
cdc_reg.io.en := en
cdc_reg.io.q.asTypeOf(in)
}
}
| module AsyncResetSynchronizerShiftReg_w1_d3_i0_36( // @[SynchronizerReg.scala:80:7]
input clock, // @[SynchronizerReg.scala:80:7]
input reset, // @[SynchronizerReg.scala:80:7]
input io_d, // @[ShiftReg.scala:36:14]
output io_q // @[ShiftReg.scala:36:14]
);
wire io_d_0 = io_d; // @[SynchronizerReg.scala:80:7]
wire _output_T = reset; // @[SynchronizerReg.scala:86:21]
wire _output_T_1 = io_d_0; // @[SynchronizerReg.scala:80:7, :87:41]
wire output_0; // @[ShiftReg.scala:48:24]
wire io_q_0; // @[SynchronizerReg.scala:80:7]
assign io_q_0 = output_0; // @[SynchronizerReg.scala:80:7]
AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_40 output_chain ( // @[ShiftReg.scala:45:23]
.clock (clock),
.reset (_output_T), // @[SynchronizerReg.scala:86:21]
.io_d (_output_T_1), // @[SynchronizerReg.scala:87:41]
.io_q (output_0)
); // @[ShiftReg.scala:45:23]
assign io_q = io_q_0; // @[SynchronizerReg.scala:80:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File FPDiv.scala:
package saturn.exu
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config._
import freechips.rocketchip.rocket._
import freechips.rocketchip.util._
import freechips.rocketchip.tile._
import chisel3.util.experimental.decode._
import saturn.common._
import saturn.insns._
class VFREC7(implicit p: Parameters) extends FPUModule()(p) {
val io = IO(new Bundle {
val rvs2_input = Input(UInt(64.W))
val eew = Input(UInt(2.W))
val frm = Input(UInt(3.W))
val out = Output(UInt(64.W))
val exc = Output(UInt(5.W))
})
val table = Seq(
127, 125, 123, 121, 119, 117, 116, 114, 112, 110, // 0-9
109, 107, 105, 104, 102, 100, 99, 97, 96, 94,
93, 91, 90, 88, 87, 85, 84, 83, 81, 80,
79, 77, 76, 75, 74, 72, 71, 70, 69, 68,
66, 65, 64, 63, 62, 61, 60, 59, 58, 57,
56, 55, 54, 53, 52, 51, 50, 49, 48, 47,
46, 45, 44, 43, 42, 41, 40, 40, 39, 38,
37, 36, 35, 35, 34, 33, 32, 31, 31, 30,
29, 28, 28, 27, 26, 25, 25, 24, 23, 23,
22, 21, 21, 20, 19, 19, 18, 17, 17, 16,
15, 15, 14, 14, 13, 12, 12, 11, 11, 10,
9, 9, 8, 8, 7, 7, 6, 5, 5, 4,
4, 3, 3, 2, 2, 1, 1, 0)
def count_leading_zeros(in: UInt): UInt = {
PriorityEncoder(Reverse(in))
}
val rvs2_bits = io.rvs2_input
val fTypes = Seq(FType.H, FType.S, FType.D)
val eew_sel = (1 to 3).map(_.U === io.eew)
val classify = Mux1H(eew_sel, fTypes.map(f => f.classify(f.recode(rvs2_bits(f.ieeeWidth-1,0)))))
val dz = WireInit(false.B)
val nv = WireInit(false.B)
val of = WireInit(false.B)
val nx = WireInit(false.B)
val ret = Wire(UInt(64.W))
ret := 0.U // it should not be possible to fall into this case
when (classify(0)) { // -inf
ret := Mux1H(eew_sel, fTypes.map(f => 1.U ## 0.U((f.ieeeWidth-1).W)))
} .elsewhen (classify(7)) { // +inf
ret := 0.U
} .elsewhen (classify(3)) { // -0
ret := Mux1H(eew_sel, fTypes.map(f => 1.U(1.W) ## ~(0.U((f.exp).W)) ## 0.U((f.sig-1).W)))
dz := true.B
} .elsewhen (classify(4)) { // +0
ret := Mux1H(eew_sel, fTypes.map(f => 0.U(1.W) ## ~(0.U((f.exp).W)) ## 0.U((f.sig-1).W)))
dz := true.B
} .elsewhen (classify(8)) { // sNaN
ret := Mux1H(eew_sel, fTypes.map(f => f.ieeeQNaN))
nv := true.B
} .elsewhen (classify(9)) { // qNaN
ret := Mux1H(eew_sel, fTypes.map(f => f.ieeeQNaN))
} .otherwise {
val sub = classify(2) || classify(5)
val exp = Mux1H(eew_sel, fTypes.map(f => (rvs2_bits >> (f.sig - 1))(f.exp-1,0)))
val sig = Mux1H(eew_sel, fTypes.map(f => rvs2_bits(f.sig-2,0)))
val sign = Mux1H(eew_sel, fTypes.map(f => rvs2_bits(f.ieeeWidth-1)))
val norm_exp = WireInit(exp)
val norm_sig = WireInit(sig)
val round_abnormal = WireInit(false.B)
when (sub) {
val leading_zeros = Mux1H(eew_sel, fTypes.map(f => count_leading_zeros(sig(f.sig-2,0))))
val exp_new = exp - leading_zeros
val sig_new = (sig << (leading_zeros +& 1.U)) & Mux1H(eew_sel, fTypes.map(f => ~(0.U((f.sig-1).W))))
norm_exp := exp_new
norm_sig := sig_new
when (exp_new =/= 0.U && ~exp_new =/= 0.U) {
round_abnormal := true.B
when (io.frm === 1.U ||
(io.frm === 2.U && !sign) ||
(io.frm === 3.U && sign)) {
ret := Mux1H(eew_sel, fTypes.map(f => (sign << (f.sig + f.exp - 1)) | (~(0.U(f.exp.W)) << (f.sig - 1)))) - 1.U
} .otherwise {
ret := Mux1H(eew_sel, fTypes.map(f => (sign << (f.sig + f.exp - 1)) | (~(0.U(f.exp.W)) << (f.sig - 1))))
}
}
}
when (!round_abnormal) {
val idx = Mux1H(eew_sel, fTypes.map(f => norm_sig >> (f.sig - 1 - 7)))(6,0)
val lookup = VecInit(table.map(_.U(7.W)))(idx)
val default_out_sig = Mux1H(eew_sel, fTypes.map(f => lookup << (f.sig - 1 - 7)))
val biases = fTypes.map(f => (1 << (f.exp - 1)) - 1)
val default_out_exp = Mux1H(eew_sel, fTypes.zip(biases).map { case (f, b) =>
(2 * b).U + ~norm_exp
})
val out_sig = WireInit(default_out_sig)
val out_exp = WireInit(default_out_exp)
when (default_out_exp === 0.U || (~default_out_exp === 0.U)) {
out_sig := (default_out_sig >> 1) | Mux1H(eew_sel, fTypes.map(f => 1.U << (f.sig - 1 - 1)))
when (~default_out_exp === 0.U) {
out_sig := default_out_sig >> 1;
out_exp := 0.U
}
}
ret := Mux1H(eew_sel, fTypes.map(f => sign ## out_exp(f.exp-1,0) ## out_sig(f.sig-2,0)))
}
when (round_abnormal) {
of := true.B
nx := true.B
}
}
io.out := Mux1H(eew_sel, fTypes.map(f => Fill(64 / f.ieeeWidth, ret(f.ieeeWidth-1,0))))
io.exc := nv ## dz ## of ## false.B ## nx
}
class VFRSQRT7(implicit p: Parameters) extends FPUModule()(p) {
val io = IO(new Bundle {
val rvs2_input = Input(UInt(64.W))
val eew = Input(UInt(2.W))
val out = Output(UInt(64.W))
val exc = Output(UInt(5.W))
})
val table = Seq(
52, 51, 50, 48, 47, 46, 44, 43,
42, 41, 40, 39, 38, 36, 35, 34,
33, 32, 31, 30, 30, 29, 28, 27,
26, 25, 24, 23, 23, 22, 21, 20,
19, 19, 18, 17, 16, 16, 15, 14,
14, 13, 12, 12, 11, 10, 10, 9,
9, 8, 7, 7, 6, 6, 5, 4,
4, 3, 3, 2, 2, 1, 1, 0,
127, 125, 123, 121, 119, 118, 116, 114,
113, 111, 109, 108, 106, 105, 103, 102,
100, 99, 97, 96, 95, 93, 92, 91,
90, 88, 87, 86, 85, 84, 83, 82,
80, 79, 78, 77, 76, 75, 74, 73,
72, 71, 70, 70, 69, 68, 67, 66,
65, 64, 63, 63, 62, 61, 60, 59,
59, 58, 57, 56, 56, 55, 54, 53
)
def count_leading_zeros(in: UInt): UInt = {
PriorityEncoder(Reverse(in))
}
val rvs2_bits = io.rvs2_input
val fTypes = Seq(FType.H, FType.S, FType.D)
val eew_sel = (1 to 3).map(_.U === io.eew)
val classify = Mux1H(eew_sel, fTypes.map(f => f.classify(f.recode(rvs2_bits(f.ieeeWidth-1,0)))))
val dz = WireInit(false.B)
val nv = WireInit(false.B)
val of = WireInit(false.B)
val nx = WireInit(false.B)
val ret = Wire(UInt(64.W))
ret := 0.U // it should not be possible to fall into this case
when (classify(0) || classify(1) || classify(2) || classify(8)) { // -inf, -normal, -subnormal, sNaN
nv := true.B
ret := Mux1H(eew_sel, fTypes.map(f => f.ieeeQNaN))
} .elsewhen (classify(9)) { // qNaN
ret := Mux1H(eew_sel, fTypes.map(f => f.ieeeQNaN))
} .elsewhen (classify(3)) { // -0
ret := Mux1H(eew_sel, fTypes.map(f => 1.U(1.W) ## ~(0.U((f.exp).W)) ## 0.U((f.sig-1).W)))
dz := true.B
} .elsewhen (classify(4)) { // +0
ret := Mux1H(eew_sel, fTypes.map(f => 0.U(1.W) ## ~(0.U((f.exp).W)) ## 0.U((f.sig-1).W)))
dz := true.B
} .elsewhen (classify(7)) { // +inf
ret := 0.U
} .otherwise {
val sub = classify(5)
val exp = Mux1H(eew_sel, fTypes.map(f => (rvs2_bits >> (f.sig - 1))(f.exp-1,0)))
val sig = Mux1H(eew_sel, fTypes.map(f => rvs2_bits(f.sig-2,0)))
val sign = Mux1H(eew_sel, fTypes.map(f => rvs2_bits(f.ieeeWidth-1)))
val norm_exp = Wire(UInt((1+fTypes.map(_.exp).max).W))
norm_exp := exp
val norm_sig = WireInit(sig)
when (sub) {
val leading_zeros = Mux1H(eew_sel, fTypes.map(f => count_leading_zeros(sig(f.sig-2,0))))
val exp_new = (0.U(1.W) ## exp) - leading_zeros
val sig_new = (sig << (leading_zeros +& 1.U)) & Mux1H(eew_sel, fTypes.map(f => ~(0.U((f.sig-1).W))))
norm_exp := exp_new
norm_sig := sig_new
}
val idx = ((norm_exp(0) << 6) | Mux1H(eew_sel, fTypes.map(f => norm_sig >> (f.sig - 1 - 7 + 1))))(6,0)
val lookup = VecInit(table.map(_.U(7.W)))(idx)
val out_sig = Mux1H(eew_sel, fTypes.map(f => lookup << (f.sig - 1 - 7)))
val biases = fTypes.map(f => (1 << (f.exp - 1)) - 1)
val out_exp = Mux1H(eew_sel, fTypes.zip(biases).map { case (f, b) =>
val bias3 = ((3 * b).S((f.exp + 2).W) - norm_exp.asSInt - 1.S).asUInt
bias3 >> 1
})
ret := Mux1H(eew_sel, fTypes.map(f => sign ## out_exp(f.exp-1,0) ## out_sig(f.sig-2,0)))
}
io.out := Mux1H(eew_sel, fTypes.map(f => Fill(64 / f.ieeeWidth, ret(f.ieeeWidth-1,0))))
io.exc := nv ## dz ## of ## false.B ## nx
}
case object FPDivSqrtFactory extends FunctionalUnitFactory {
def insns = Seq(
FDIV.VV, FDIV.VF,
FRDIV.VF,
FSQRT_V,
FRSQRT7_V,
FREC7_V,
FCLASS_V
).map(_.elementWise)
def generate(implicit p: Parameters) = new FPDivSqrt()(p)
}
class FPDivSqrt(implicit p: Parameters) extends IterativeFunctionalUnit()(p) with HasFPUParameters {
val supported_insns = FPDivSqrtFactory.insns
io.set_vxsat := false.B
val divSqrt = Module(new hardfloat.DivSqrtRecF64)
val divSqrt16 = Module(new hardfloat.DivSqrtRecFN_small(FType.H.exp, FType.H.sig, 0))
val accept_inst = new VectorDecoder(
io.iss.op.funct3, io.iss.op.funct6, io.iss.op.rs1, io.iss.op.rs2,
supported_insns,
Seq(FPSwapVdV2))
val ctrl = new VectorDecoder(
op.funct3, op.funct6, op.rs1, op.rs2,
supported_insns,
Seq(FPSwapVdV2))
val ctrl_isDiv = io.iss.op.opff6.isOneOf(OPFFunct6.fdiv, OPFFunct6.frdiv)
val divSqrt_ready = (ctrl_isDiv && divSqrt.io.inReady_div) || (!ctrl_isDiv && divSqrt.io.inReady_sqrt)
val divSqrt16_ready = divSqrt16.io.inReady
val div_op = op.opff6.isOneOf(OPFFunct6.fdiv, OPFFunct6.frdiv)
val rvs2_bits = op.rvs2_elem
val rvs1_bits = op.rvs1_elem
divSqrt.io.detectTininess := hardfloat.consts.tininess_afterRounding
divSqrt.io.roundingMode := op.frm
divSqrt16.io.detectTininess := hardfloat.consts.tininess_afterRounding
divSqrt16.io.roundingMode := op.frm
val iss_fire_pipe = Reg(Bool())
iss_fire_pipe := io.iss.valid && io.iss.ready
divSqrt.io.inValid := iss_fire_pipe && !(op.rvd_eew === 1.U) && (div_op || (op.opff6 === OPFFunct6.funary1 && op.rs1 === 0.U))
divSqrt.io.sqrtOp := !div_op
divSqrt16.io.inValid := iss_fire_pipe && (op.rvd_eew === 1.U) && (div_op || (op.opff6 === OPFFunct6.funary1 && op.rs1 === 0.U))
divSqrt16.io.sqrtOp := !div_op
io.hazard.valid := valid
io.hazard.bits.eg := op.wvd_eg
when (op.rvs1_eew === 3.U) {
divSqrt.io.a := Mux(ctrl.bool(FPSwapVdV2) && div_op, FType.D.recode(rvs1_bits), FType.D.recode(rvs2_bits))
divSqrt.io.b := Mux(ctrl.bool(FPSwapVdV2) || !div_op, FType.D.recode(rvs2_bits), FType.D.recode(rvs1_bits))
} .otherwise {
val narrow_rvs2_bits = rvs2_bits(31,0)
val narrow_rvs1_bits = rvs1_bits(31,0)
val widen = Seq(FType.S.recode(narrow_rvs2_bits), FType.S.recode(narrow_rvs1_bits)).zip(
Seq.fill(2)(Module(new hardfloat.RecFNToRecFN(8, 24, 11, 53)))).map { case(input, upconvert) =>
upconvert.io.in := input
upconvert.io.roundingMode := op.frm
upconvert.io.detectTininess := hardfloat.consts.tininess_afterRounding
upconvert
}
divSqrt.io.a := Mux(ctrl.bool(FPSwapVdV2) && div_op, widen(1).io.out, widen(0).io.out)
divSqrt.io.b := Mux(ctrl.bool(FPSwapVdV2) || !div_op, widen(0).io.out, widen(1).io.out)
}
divSqrt16.io.a := Mux(ctrl.bool(FPSwapVdV2) && div_op, FType.H.recode(rvs1_bits), FType.H.recode(rvs2_bits))
divSqrt16.io.b := Mux(ctrl.bool(FPSwapVdV2) || !div_op, FType.H.recode(rvs2_bits), FType.H.recode(rvs1_bits))
val divSqrt_out_valid = divSqrt.io.outValid_div || divSqrt.io.outValid_sqrt
val divSqrt16_out_valid = divSqrt16.io.outValid_div || divSqrt16.io.outValid_sqrt
val narrow = Module(new hardfloat.RecFNToRecFN(11, 53, 8, 24))
narrow.io.roundingMode := op.frm
narrow.io.detectTininess := hardfloat.consts.tininess_afterRounding
narrow.io.in := divSqrt.io.out
val divSqrt_out = Mux(op.vd_eew === 3.U, FType.D.ieee(divSqrt.io.out), Fill(2, FType.S.ieee(narrow.io.out)))
val out_buffer = RegEnable(divSqrt_out, divSqrt_out_valid)
val out_toWrite = RegInit(false.B)
val divSqrt_write = Mux(out_toWrite, out_buffer, divSqrt_out)
val divSqrt16_out = FType.H.ieee(divSqrt16.io.out)
val out16_buffer = RegEnable(divSqrt16_out, divSqrt16_out_valid)
val out16_toWrite = RegInit(false.B)
val divSqrt16_write = Mux(out16_toWrite, out16_buffer, divSqrt16_out)
// vfclass instruction
val gen_vfclass = Seq(FType.H, FType.S, FType.D).zipWithIndex.map { case(fType, i) =>
Fill(2, Cat(0.U((fType.ieeeWidth-10).W), fType.classify(fType.recode(rvs2_bits(fType.ieeeWidth-1,0)))))
}
val vfclass_inst = op.opff6.isOneOf(OPFFunct6.funary1) && op.rs1 === 16.U
val vfrsqrt7_inst = op.opff6.isOneOf(OPFFunct6.funary1) && op.rs1 === 4.U
val vfrec7_inst = op.opff6.isOneOf(OPFFunct6.funary1) && op.rs1 === 5.U
// Reciprocal Sqrt Approximation
val recSqrt7 = Module(new VFRSQRT7)
recSqrt7.io.rvs2_input := Mux(valid && vfrsqrt7_inst, rvs2_bits, 0.U)
recSqrt7.io.eew := op.rvs2_eew
// Reciprocal Approximation
val rec7 = Module(new VFREC7)
rec7.io.rvs2_input := Mux(valid && vfrec7_inst, rvs2_bits, 0.U)
rec7.io.eew := op.rvs2_eew
rec7.io.frm := op.frm
// Capture result in case of write port backpressure
when (io.write.fire) {
out_toWrite := false.B
out16_toWrite := false.B
} .elsewhen (divSqrt_out_valid) {
out_toWrite := true.B
out16_toWrite := true.B
}
val out = Mux1H(
Seq(vfclass_inst, vfrsqrt7_inst, vfrec7_inst, out_toWrite || divSqrt_out_valid || divSqrt16_out_valid),
Seq(Mux1H(Seq(op.rvs2_eew === 3.U, op.rvs2_eew === 2.U, op.rvs2_eew === 1.U), Seq(gen_vfclass(2), gen_vfclass(1), gen_vfclass(0))), recSqrt7.io.out, rec7.io.out, divSqrt_write)
)(63,0)
io.write.valid := ((vfclass_inst || vfrsqrt7_inst || vfrec7_inst) && valid) || out_toWrite || divSqrt_out_valid
io.write.bits.eg := op.wvd_eg
io.write.bits.mask := FillInterleaved(8, op.wmask)
io.write.bits.data := Fill(dLenB >> 3, out)
io.iss.ready := accept_inst.matched && ((divSqrt_ready && io.iss.op.vd_eew >= 2.U) || (divSqrt16_ready && io.iss.op.vd_eew === 1.U)) && (!valid || last)
last := io.write.fire
io.set_fflags.valid := divSqrt_out_valid || divSqrt16_out_valid || (vfrsqrt7_inst && io.write.fire) || (vfrec7_inst && io.write.fire)
io.set_fflags.bits := (divSqrt.io.exceptionFlags & Fill(5, divSqrt_out_valid)) | divSqrt16.io.exceptionFlags & Fill(5, divSqrt_out_valid) | (recSqrt7.io.exc & Fill(5, vfrsqrt7_inst)) | (rec7.io.exc & Fill(5, vfrec7_inst))
io.scalar_write.valid := false.B
io.scalar_write.bits := DontCare
io.acc := false.B
io.tail := false.B
}
| module VFREC7( // @[FPDiv.scala:13:7]
input [63:0] io_rvs2_input, // @[FPDiv.scala:14:14]
input [1:0] io_eew, // @[FPDiv.scala:14:14]
input [2:0] io_frm, // @[FPDiv.scala:14:14]
output [63:0] io_out, // @[FPDiv.scala:14:14]
output [4:0] io_exc // @[FPDiv.scala:14:14]
);
wire [127:0][6:0] _GEN = '{7'h0, 7'h1, 7'h1, 7'h2, 7'h2, 7'h3, 7'h3, 7'h4, 7'h4, 7'h5, 7'h5, 7'h6, 7'h7, 7'h7, 7'h8, 7'h8, 7'h9, 7'h9, 7'hA, 7'hB, 7'hB, 7'hC, 7'hC, 7'hD, 7'hE, 7'hE, 7'hF, 7'hF, 7'h10, 7'h11, 7'h11, 7'h12, 7'h13, 7'h13, 7'h14, 7'h15, 7'h15, 7'h16, 7'h17, 7'h17, 7'h18, 7'h19, 7'h19, 7'h1A, 7'h1B, 7'h1C, 7'h1C, 7'h1D, 7'h1E, 7'h1F, 7'h1F, 7'h20, 7'h21, 7'h22, 7'h23, 7'h23, 7'h24, 7'h25, 7'h26, 7'h27, 7'h28, 7'h28, 7'h29, 7'h2A, 7'h2B, 7'h2C, 7'h2D, 7'h2E, 7'h2F, 7'h30, 7'h31, 7'h32, 7'h33, 7'h34, 7'h35, 7'h36, 7'h37, 7'h38, 7'h39, 7'h3A, 7'h3B, 7'h3C, 7'h3D, 7'h3E, 7'h3F, 7'h40, 7'h41, 7'h42, 7'h44, 7'h45, 7'h46, 7'h47, 7'h48, 7'h4A, 7'h4B, 7'h4C, 7'h4D, 7'h4F, 7'h50, 7'h51, 7'h53, 7'h54, 7'h55, 7'h57, 7'h58, 7'h5A, 7'h5B, 7'h5D, 7'h5E, 7'h60, 7'h61, 7'h63, 7'h64, 7'h66, 7'h68, 7'h69, 7'h6B, 7'h6D, 7'h6E, 7'h70, 7'h72, 7'h74, 7'h75, 7'h77, 7'h79, 7'h7B, 7'h7D, 7'h7F};
wire eew_sel_0 = io_eew == 2'h1; // @[FPDiv.scala:44:34]
wire eew_sel_1 = io_eew == 2'h2; // @[FPDiv.scala:44:34]
wire classify_rawIn_isZeroExpIn = io_rvs2_input[14:10] == 5'h0; // @[FPDiv.scala:45:78]
wire [3:0] classify_rawIn_normDist = io_rvs2_input[9] ? 4'h0 : io_rvs2_input[8] ? 4'h1 : io_rvs2_input[7] ? 4'h2 : io_rvs2_input[6] ? 4'h3 : io_rvs2_input[5] ? 4'h4 : io_rvs2_input[4] ? 4'h5 : io_rvs2_input[3] ? 4'h6 : io_rvs2_input[2] ? 4'h7 : {3'h4, ~(io_rvs2_input[1])}; // @[Mux.scala:50:70]
wire [24:0] _classify_rawIn_subnormFract_T = {15'h0, io_rvs2_input[9:0]} << classify_rawIn_normDist; // @[Mux.scala:50:70]
wire [5:0] _classify_rawIn_adjustedExp_T_4 = (classify_rawIn_isZeroExpIn ? {2'h3, ~classify_rawIn_normDist} : {1'h0, io_rvs2_input[14:10]}) + {4'h4, classify_rawIn_isZeroExpIn ? 2'h2 : 2'h1}; // @[Mux.scala:50:70]
wire _classify_rawIn_out_sig_T_2 = classify_rawIn_isZeroExpIn ? _classify_rawIn_subnormFract_T[8] : io_rvs2_input[9]; // @[FPDiv.scala:45:78]
wire [2:0] _classify_T_2 = classify_rawIn_isZeroExpIn & ~(|(io_rvs2_input[9:0])) ? 3'h0 : _classify_rawIn_adjustedExp_T_4[5:3]; // @[FPDiv.scala:45:78]
wire _classify_isInf_T = _classify_T_2[0] | (&(_classify_rawIn_adjustedExp_T_4[5:4])) & (|(io_rvs2_input[9:0])); // @[FPDiv.scala:45:78]
wire [2:0] classify_code = {_classify_T_2[2:1], _classify_isInf_T}; // @[FPU.scala:254:17]
wire _classify_isNormal_T = _classify_T_2[2:1] == 2'h1; // @[FPU.scala:259:46]
wire classify_isSubnormal = classify_code == 3'h1 | _classify_isNormal_T & {_classify_isInf_T, _classify_rawIn_adjustedExp_T_4[2:0]} < 4'h2; // @[FPU.scala:254:17, :258:{30,55}, :259:{28,36,46,54}]
wire classify_isNormal = _classify_isNormal_T & (|{_classify_isInf_T, _classify_rawIn_adjustedExp_T_4[2:1]}) | _classify_T_2[2:1] == 2'h2; // @[FPU.scala:258:55, :259:46, :260:{35,38,57,67}]
wire classify_isZero = classify_code == 3'h0; // @[FPU.scala:254:17, :261:23]
wire classify_isInf = (&(_classify_T_2[2:1])) & ~_classify_isInf_T; // @[FPU.scala:256:28, :262:{27,30}]
wire classify_rawIn_isZeroExpIn_1 = io_rvs2_input[30:23] == 8'h0; // @[FPDiv.scala:45:78]
wire [4:0] classify_rawIn_normDist_1 = io_rvs2_input[22] ? 5'h0 : io_rvs2_input[21] ? 5'h1 : io_rvs2_input[20] ? 5'h2 : io_rvs2_input[19] ? 5'h3 : io_rvs2_input[18] ? 5'h4 : io_rvs2_input[17] ? 5'h5 : io_rvs2_input[16] ? 5'h6 : io_rvs2_input[15] ? 5'h7 : io_rvs2_input[14] ? 5'h8 : io_rvs2_input[13] ? 5'h9 : io_rvs2_input[12] ? 5'hA : io_rvs2_input[11] ? 5'hB : io_rvs2_input[10] ? 5'hC : io_rvs2_input[9] ? 5'hD : io_rvs2_input[8] ? 5'hE : io_rvs2_input[7] ? 5'hF : io_rvs2_input[6] ? 5'h10 : io_rvs2_input[5] ? 5'h11 : io_rvs2_input[4] ? 5'h12 : io_rvs2_input[3] ? 5'h13 : io_rvs2_input[2] ? 5'h14 : io_rvs2_input[1] ? 5'h15 : 5'h16; // @[Mux.scala:50:70]
wire [53:0] _classify_rawIn_subnormFract_T_2 = {31'h0, io_rvs2_input[22:0]} << classify_rawIn_normDist_1; // @[Mux.scala:50:70]
wire [8:0] _classify_rawIn_adjustedExp_T_9 = (classify_rawIn_isZeroExpIn_1 ? {4'hF, ~classify_rawIn_normDist_1} : {1'h0, io_rvs2_input[30:23]}) + {7'h20, classify_rawIn_isZeroExpIn_1 ? 2'h2 : 2'h1}; // @[Mux.scala:50:70]
wire _classify_rawIn_out_sig_T_6 = classify_rawIn_isZeroExpIn_1 ? _classify_rawIn_subnormFract_T_2[21] : io_rvs2_input[22]; // @[FPDiv.scala:45:78]
wire [2:0] _classify_T_25 = classify_rawIn_isZeroExpIn_1 & ~(|(io_rvs2_input[22:0])) ? 3'h0 : _classify_rawIn_adjustedExp_T_9[8:6]; // @[FPDiv.scala:45:78]
wire _classify_isInf_T_2 = _classify_T_25[0] | (&(_classify_rawIn_adjustedExp_T_9[8:7])) & (|(io_rvs2_input[22:0])); // @[FPDiv.scala:45:78]
wire [2:0] classify_code_1 = {_classify_T_25[2:1], _classify_isInf_T_2}; // @[FPU.scala:254:17]
wire _classify_isNormal_T_4 = _classify_T_25[2:1] == 2'h1; // @[FPU.scala:259:46]
wire classify_isSubnormal_1 = classify_code_1 == 3'h1 | _classify_isNormal_T_4 & {_classify_isInf_T_2, _classify_rawIn_adjustedExp_T_9[5:0]} < 7'h2; // @[FPU.scala:254:17, :258:{30,55}, :259:{28,36,46,54}]
wire classify_isNormal_1 = _classify_isNormal_T_4 & (|{_classify_isInf_T_2, _classify_rawIn_adjustedExp_T_9[5:1]}) | _classify_T_25[2:1] == 2'h2; // @[FPU.scala:258:55, :259:46, :260:{35,38,57,67}]
wire classify_isZero_1 = classify_code_1 == 3'h0; // @[FPU.scala:254:17, :261:23]
wire classify_isInf_1 = (&(_classify_T_25[2:1])) & ~_classify_isInf_T_2; // @[FPU.scala:256:28, :262:{27,30}]
wire classify_rawIn_isZeroExpIn_2 = io_rvs2_input[62:52] == 11'h0; // @[rawFloatFromFN.scala:45:19, :48:30]
wire [5:0] classify_rawIn_normDist_2 = io_rvs2_input[51] ? 6'h0 : io_rvs2_input[50] ? 6'h1 : io_rvs2_input[49] ? 6'h2 : io_rvs2_input[48] ? 6'h3 : io_rvs2_input[47] ? 6'h4 : io_rvs2_input[46] ? 6'h5 : io_rvs2_input[45] ? 6'h6 : io_rvs2_input[44] ? 6'h7 : io_rvs2_input[43] ? 6'h8 : io_rvs2_input[42] ? 6'h9 : io_rvs2_input[41] ? 6'hA : io_rvs2_input[40] ? 6'hB : io_rvs2_input[39] ? 6'hC : io_rvs2_input[38] ? 6'hD : io_rvs2_input[37] ? 6'hE : io_rvs2_input[36] ? 6'hF : io_rvs2_input[35] ? 6'h10 : io_rvs2_input[34] ? 6'h11 : io_rvs2_input[33] ? 6'h12 : io_rvs2_input[32] ? 6'h13 : io_rvs2_input[31] ? 6'h14 : io_rvs2_input[30] ? 6'h15 : io_rvs2_input[29] ? 6'h16 : io_rvs2_input[28] ? 6'h17 : io_rvs2_input[27] ? 6'h18 : io_rvs2_input[26] ? 6'h19 : io_rvs2_input[25] ? 6'h1A : io_rvs2_input[24] ? 6'h1B : io_rvs2_input[23] ? 6'h1C : io_rvs2_input[22] ? 6'h1D : io_rvs2_input[21] ? 6'h1E : io_rvs2_input[20] ? 6'h1F : io_rvs2_input[19] ? 6'h20 : io_rvs2_input[18] ? 6'h21 : io_rvs2_input[17] ? 6'h22 : io_rvs2_input[16] ? 6'h23 : io_rvs2_input[15] ? 6'h24 : io_rvs2_input[14] ? 6'h25 : io_rvs2_input[13] ? 6'h26 : io_rvs2_input[12] ? 6'h27 : io_rvs2_input[11] ? 6'h28 : io_rvs2_input[10] ? 6'h29 : io_rvs2_input[9] ? 6'h2A : io_rvs2_input[8] ? 6'h2B : io_rvs2_input[7] ? 6'h2C : io_rvs2_input[6] ? 6'h2D : io_rvs2_input[5] ? 6'h2E : io_rvs2_input[4] ? 6'h2F : io_rvs2_input[3] ? 6'h30 : io_rvs2_input[2] ? 6'h31 : {5'h19, ~(io_rvs2_input[1])}; // @[Mux.scala:50:70]
wire [114:0] _classify_rawIn_subnormFract_T_4 = {63'h0, io_rvs2_input[51:0]} << classify_rawIn_normDist_2; // @[Mux.scala:50:70]
wire [11:0] _classify_rawIn_adjustedExp_T_14 = (classify_rawIn_isZeroExpIn_2 ? {6'h3F, ~classify_rawIn_normDist_2} : {1'h0, io_rvs2_input[62:52]}) + {10'h100, classify_rawIn_isZeroExpIn_2 ? 2'h2 : 2'h1}; // @[Mux.scala:50:70]
wire _classify_rawIn_out_sig_T_10 = classify_rawIn_isZeroExpIn_2 ? _classify_rawIn_subnormFract_T_4[50] : io_rvs2_input[51]; // @[rawFloatFromFN.scala:46:21, :48:30, :52:{33,64}, :70:33]
wire [2:0] _classify_T_48 = classify_rawIn_isZeroExpIn_2 & ~(|(io_rvs2_input[51:0])) ? 3'h0 : _classify_rawIn_adjustedExp_T_14[11:9]; // @[recFNFromFN.scala:48:{15,50}]
wire _classify_isInf_T_4 = _classify_T_48[0] | (&(_classify_rawIn_adjustedExp_T_14[11:10])) & (|(io_rvs2_input[51:0])); // @[recFNFromFN.scala:48:{15,76}]
wire [2:0] classify_code_2 = {_classify_T_48[2:1], _classify_isInf_T_4}; // @[FPU.scala:254:17]
wire _classify_isNormal_T_8 = _classify_T_48[2:1] == 2'h1; // @[FPU.scala:259:46]
wire classify_isSubnormal_2 = classify_code_2 == 3'h1 | _classify_isNormal_T_8 & {_classify_isInf_T_4, _classify_rawIn_adjustedExp_T_14[8:0]} < 10'h2; // @[FPU.scala:254:17, :258:{30,55}, :259:{28,36,46,54}]
wire classify_isNormal_2 = _classify_isNormal_T_8 & (|{_classify_isInf_T_4, _classify_rawIn_adjustedExp_T_14[8:1]}) | _classify_T_48[2:1] == 2'h2; // @[FPU.scala:258:55, :259:46, :260:{35,38,57,67}]
wire classify_isZero_2 = classify_code_2 == 3'h0; // @[FPU.scala:254:17, :261:23]
wire classify_isInf_2 = (&(_classify_T_48[2:1])) & ~_classify_isInf_T_4; // @[FPU.scala:256:28, :262:{27,30}]
wire [9:0] classify = (eew_sel_0 ? {(&classify_code) & _classify_rawIn_out_sig_T_2, (&classify_code) & ~_classify_rawIn_out_sig_T_2, classify_isInf & ~(io_rvs2_input[15]), classify_isNormal & ~(io_rvs2_input[15]), classify_isSubnormal & ~(io_rvs2_input[15]), classify_isZero & ~(io_rvs2_input[15]), classify_isZero & io_rvs2_input[15], classify_isSubnormal & io_rvs2_input[15], classify_isNormal & io_rvs2_input[15], classify_isInf & io_rvs2_input[15]} : 10'h0) | (eew_sel_1 ? {(&classify_code_1) & _classify_rawIn_out_sig_T_6, (&classify_code_1) & ~_classify_rawIn_out_sig_T_6, classify_isInf_1 & ~(io_rvs2_input[31]), classify_isNormal_1 & ~(io_rvs2_input[31]), classify_isSubnormal_1 & ~(io_rvs2_input[31]), classify_isZero_1 & ~(io_rvs2_input[31]), classify_isZero_1 & io_rvs2_input[31], classify_isSubnormal_1 & io_rvs2_input[31], classify_isNormal_1 & io_rvs2_input[31], classify_isInf_1 & io_rvs2_input[31]} : 10'h0) | ((&io_eew) ? {(&classify_code_2) & _classify_rawIn_out_sig_T_10, (&classify_code_2) & ~_classify_rawIn_out_sig_T_10, classify_isInf_2 & ~(io_rvs2_input[63]), classify_isNormal_2 & ~(io_rvs2_input[63]), classify_isSubnormal_2 & ~(io_rvs2_input[63]), classify_isZero_2 & ~(io_rvs2_input[63]), classify_isZero_2 & io_rvs2_input[63], classify_isSubnormal_2 & io_rvs2_input[63], classify_isNormal_2 & io_rvs2_input[63], classify_isInf_2 & io_rvs2_input[63]} : 10'h0); // @[Mux.scala:30:73]
wire sub = classify[2] | classify[5]; // @[Mux.scala:30:73]
wire [10:0] exp = {3'h0, {3'h0, eew_sel_0 ? io_rvs2_input[14:10] : 5'h0} | (eew_sel_1 ? io_rvs2_input[30:23] : 8'h0)} | ((&io_eew) ? io_rvs2_input[62:52] : 11'h0); // @[Mux.scala:30:73]
wire [51:0] sig = {29'h0, {13'h0, eew_sel_0 ? io_rvs2_input[9:0] : 10'h0} | (eew_sel_1 ? io_rvs2_input[22:0] : 23'h0)} | ((&io_eew) ? io_rvs2_input[51:0] : 52'h0); // @[Mux.scala:30:73]
wire sign = eew_sel_0 & io_rvs2_input[15] | eew_sel_1 & io_rvs2_input[31] | (&io_eew) & io_rvs2_input[63]; // @[Mux.scala:30:73]
wire [18:0] _GEN_0 = {sig[5:4], sig[7:6], sig[9:8], sig[11:10], sig[13:12], sig[15:14], sig[17:16], sig[19:18], sig[21:20], sig[23]} & 19'h55555; // @[Mux.scala:30:73]
wire [3:0] _GEN_1 = _GEN_0[18:15] | {sig[7:6], sig[9:8]} & 4'h5; // @[Mux.scala:30:73]
wire [3:0] _GEN_2 = _GEN_0[10:7] | {sig[15:14], sig[17:16]} & 4'h5; // @[Mux.scala:30:73]
wire [3:0] _GEN_3 = {_GEN_0[2:0], 1'h0} | {sig[23:22], sig[25:24]} & 4'h5; // @[Mux.scala:30:73]
wire [5:0] leading_zeros = {1'h0, {1'h0, ~eew_sel_0 | sig[9] ? 4'h0 : sig[8] ? 4'h1 : sig[7] ? 4'h2 : sig[6] ? 4'h3 : sig[5] ? 4'h4 : sig[4] ? 4'h5 : sig[3] ? 4'h6 : sig[2] ? 4'h7 : {3'h4, ~(sig[1])}} | (~eew_sel_1 | sig[22] ? 5'h0 : sig[21] ? 5'h1 : sig[20] ? 5'h2 : sig[19] ? 5'h3 : sig[18] ? 5'h4 : sig[17] ? 5'h5 : sig[16] ? 5'h6 : sig[15] ? 5'h7 : sig[14] ? 5'h8 : sig[13] ? 5'h9 : sig[12] ? 5'hA : sig[11] ? 5'hB : sig[10] ? 5'hC : sig[9] ? 5'hD : sig[8] ? 5'hE : sig[7] ? 5'hF : sig[6] ? 5'h10 : sig[5] ? 5'h11 : sig[4] ? 5'h12 : sig[3] ? 5'h13 : sig[2] ? 5'h14 : sig[1] ? 5'h15 : 5'h16)} | (~(&io_eew) | sig[51] ? 6'h0 : sig[50] ? 6'h1 : sig[49] ? 6'h2 : sig[48] ? 6'h3 : sig[47] ? 6'h4 : sig[46] ? 6'h5 : sig[45] ? 6'h6 : sig[44] ? 6'h7 : sig[43] ? 6'h8 : sig[42] ? 6'h9 : sig[41] ? 6'hA : sig[40] ? 6'hB : sig[39] ? 6'hC : sig[38] ? 6'hD : sig[37] ? 6'hE : sig[36] ? 6'hF : sig[35] ? 6'h10 : sig[34] ? 6'h11 : sig[33] ? 6'h12 : sig[32] ? 6'h13 : sig[31] ? 6'h14 : sig[30] ? 6'h15 : sig[29] ? 6'h16 : sig[28] ? 6'h17 : sig[27] ? 6'h18 : sig[26] ? 6'h19 : sig[25] ? 6'h1A : _GEN_3[0] ? 6'h1B : _GEN_3[1] ? 6'h1C : _GEN_3[2] ? 6'h1D : _GEN_3[3] ? 6'h1E : sig[20] ? 6'h1F : sig[19] ? 6'h20 : _GEN_0[5] | sig[18] ? 6'h21 : sig[17] ? 6'h22 : _GEN_2[0] ? 6'h23 : _GEN_2[1] ? 6'h24 : _GEN_2[2] ? 6'h25 : _GEN_2[3] ? 6'h26 : sig[12] ? 6'h27 : sig[11] ? 6'h28 : _GEN_0[13] | sig[10] ? 6'h29 : sig[9] ? 6'h2A : _GEN_1[0] ? 6'h2B : _GEN_1[1] ? 6'h2C : _GEN_1[2] ? 6'h2D : _GEN_1[3] ? 6'h2E : sig[4] ? 6'h2F : sig[3] ? 6'h30 : sig[2] ? 6'h31 : {5'h19, ~(sig[1])}); // @[OneHot.scala:48:45]
wire [10:0] _exp_new_T = exp - {5'h0, leading_zeros}; // @[Mux.scala:30:73]
wire [178:0] _sig_new_T_1 = {127'h0, sig} << {1'h0, leading_zeros} + 7'h1; // @[Mux.scala:30:73]
wire [10:0] norm_exp = sub ? _exp_new_T : exp; // @[Mux.scala:30:73]
wire [48:0] _GEN_4 = sub ? _sig_new_T_1[51:3] & ({29'h0, {13'h0, {7{eew_sel_0}}} | {20{eew_sel_1}}} | {49{&io_eew}}) : sig[51:3]; // @[Mux.scala:30:73]
wire _GEN_5 = (|_exp_new_T) & _exp_new_T != 11'h7FF; // @[FPDiv.scala:82:25, :87:{21,29,41}]
wire round_abnormal = sub & _GEN_5; // @[FPDiv.scala:70:27, :77:34, :79:16, :87:{29,50}]
wire [6:0] _GEN_6 = _GEN[(eew_sel_0 ? _GEN_4[6:0] : 7'h0) | (eew_sel_1 ? _GEN_4[19:13] : 7'h0) | ((&io_eew) ? _GEN_4[48:42] : 7'h0)]; // @[Mux.scala:30:73]
wire [51:0] default_out_sig = {29'h0, {13'h0, eew_sel_0 ? {_GEN_6, 3'h0} : 10'h0} | (eew_sel_1 ? {_GEN_6, 16'h0} : 23'h0)} | ((&io_eew) ? {_GEN_6, 45'h0} : 52'h0); // @[Mux.scala:30:73]
wire [10:0] default_out_exp = (eew_sel_0 ? ~norm_exp + 11'h1E : 11'h0) | (eew_sel_1 ? ~norm_exp + 11'hFE : 11'h0) | ((&io_eew) ? ~norm_exp - 11'h2 : 11'h0); // @[Mux.scala:30:73]
wire _GEN_7 = default_out_exp == 11'h0 | (&default_out_exp); // @[Mux.scala:30:73]
wire [51:0] out_sig = _GEN_7 ? ((&default_out_exp) ? {1'h0, default_out_sig[51:1]} : {&io_eew, default_out_sig[51:24], default_out_sig[23] | eew_sel_1, default_out_sig[22:11], default_out_sig[10] | eew_sel_0, default_out_sig[9:1]}) : default_out_sig; // @[Mux.scala:30:73]
wire [10:0] out_exp = _GEN_7 & (&default_out_exp) ? 11'h0 : default_out_exp; // @[Mux.scala:30:73]
wire [63:0] ret = classify[0] ? {&io_eew, 31'h0, eew_sel_1, 15'h0, eew_sel_0, 15'h0} : classify[7] ? 64'h0 : classify[3] ? {32'h0, {16'h0, eew_sel_0 ? 16'hFC00 : 16'h0} | (eew_sel_1 ? 32'hFF800000 : 32'h0)} | ((&io_eew) ? 64'hFFF0000000000000 : 64'h0) : classify[4] ? {32'h0, {16'h0, eew_sel_0 ? 16'h7C00 : 16'h0} | (eew_sel_1 ? 32'h7F800000 : 32'h0)} | ((&io_eew) ? 64'h7FF0000000000000 : 64'h0) : classify[8] ? {32'h0, {16'h0, eew_sel_0 ? 16'h7E00 : 16'h0} | (eew_sel_1 ? 32'h7FC00000 : 32'h0)} | ((&io_eew) ? 64'h7FF8000000000000 : 64'h0) : classify[9] ? {32'h0, {16'h0, eew_sel_0 ? 16'h7E00 : 16'h0} | (eew_sel_1 ? 32'h7FC00000 : 32'h0)} | ((&io_eew) ? 64'h7FF8000000000000 : 64'h0) : round_abnormal ? (sub & _GEN_5 ? (io_frm == 3'h1 | io_frm == 3'h2 & ~sign | io_frm == 3'h3 & sign ? ({32'h0, {16'h0, eew_sel_0 ? {sign, 15'h7C00} : 16'h0} | (eew_sel_1 ? {sign, 31'h7F800000} : 32'h0)} | ((&io_eew) ? {sign, 63'h7FF0000000000000} : 64'h0)) - 64'h1 : {32'h0, {16'h0, eew_sel_0 ? {sign, 15'h7C00} : 16'h0} | (eew_sel_1 ? {sign, 31'h7F800000} : 32'h0)} | ((&io_eew) ? {sign, 63'h7FF0000000000000} : 64'h0)) : 64'h0) : {32'h0, {16'h0, eew_sel_0 ? {sign, out_exp[4:0], out_sig[9:0]} : 16'h0} | (eew_sel_1 ? {sign, out_exp[7:0], out_sig[22:0]} : 32'h0)} | ((&io_eew) ? {sign, out_exp, out_sig} : 64'h0); // @[Mux.scala:30:73]
wire nx = ~(classify[0] | classify[7] | classify[3] | classify[4] | classify[8] | classify[9]) & round_abnormal; // @[Mux.scala:30:73]
assign io_out = (eew_sel_0 ? {2{{2{ret[15:0]}}}} : 64'h0) | (eew_sel_1 ? {2{ret[31:0]}} : 64'h0) | ((&io_eew) ? ret : 64'h0); // @[Mux.scala:30:73]
assign io_exc = {~(classify[0] | classify[7] | classify[3] | classify[4]) & classify[8], ~(classify[0] | classify[7]) & (classify[3] | classify[4]), nx, 1'h0, nx}; // @[Mux.scala:30:73]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
File SBA.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.devices.debug.systembusaccess
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.amba.{AMBAProt, AMBAProtField}
import freechips.rocketchip.devices.debug.{DebugModuleKey, RWNotify, SBCSFields, WNotifyVal}
import freechips.rocketchip.diplomacy.TransferSizes
import freechips.rocketchip.regmapper.{RegField, RegFieldDesc, RegFieldGroup, RegFieldWrType}
import freechips.rocketchip.tilelink.{TLClientNode, TLMasterParameters, TLMasterPortParameters}
import freechips.rocketchip.util.property
object SystemBusAccessState extends scala.Enumeration {
type SystemBusAccessState = Value
val Idle, SBReadRequest, SBWriteRequest, SBReadResponse, SBWriteResponse = Value
}
object SBErrorCode extends scala.Enumeration {
type SBErrorCode = Value
val NoError = Value(0)
val Timeout = Value(1)
val BadAddr = Value(2)
val AlgnError = Value(3)
val BadAccess = Value(4)
val OtherError = Value(7)
}
object SystemBusAccessModule
{
def apply(sb2tl: SBToTL, dmactive: Bool, dmAuthenticated: Bool)(implicit p: Parameters):
(Seq[RegField], Seq[Seq[RegField]], Seq[Seq[RegField]]) =
{
import SBErrorCode._
val cfg = p(DebugModuleKey).get
val anyAddressWrEn = WireInit(false.B).suggestName("anyAddressWrEn")
val anyDataRdEn = WireInit(false.B).suggestName("anyDataRdEn")
val anyDataWrEn = WireInit(false.B).suggestName("anyDataWrEn")
// --- SBCS Status Register ---
val SBCSFieldsReg = Reg(new SBCSFields()).suggestName("SBCSFieldsReg")
val SBCSFieldsRegReset = WireInit(0.U.asTypeOf(new SBCSFields()))
SBCSFieldsRegReset.sbversion := 1.U(1.W) // This code implements a version of the spec after January 1, 2018
SBCSFieldsRegReset.sbbusy := (sb2tl.module.io.sbStateOut =/= SystemBusAccessState.Idle.id.U)
SBCSFieldsRegReset.sbaccess := 2.U
SBCSFieldsRegReset.sbasize := sb2tl.module.edge.bundle.addressBits.U
SBCSFieldsRegReset.sbaccess128 := (cfg.maxSupportedSBAccess == 128).B
SBCSFieldsRegReset.sbaccess64 := (cfg.maxSupportedSBAccess >= 64).B
SBCSFieldsRegReset.sbaccess32 := (cfg.maxSupportedSBAccess >= 32).B
SBCSFieldsRegReset.sbaccess16 := (cfg.maxSupportedSBAccess >= 16).B
SBCSFieldsRegReset.sbaccess8 := (cfg.maxSupportedSBAccess >= 8).B
val SBCSRdData = WireInit(0.U.asTypeOf(new SBCSFields())).suggestName("SBCSRdData")
val SBCSWrDataVal = WireInit(0.U(32.W))
val SBCSWrData = WireInit(SBCSWrDataVal.asTypeOf(new SBCSFields()))
val sberrorWrEn = WireInit(false.B)
val sbreadondataWrEn = WireInit(false.B)
val sbautoincrementWrEn= WireInit(false.B)
val sbaccessWrEn = WireInit(false.B)
val sbreadonaddrWrEn = WireInit(false.B)
val sbbusyerrorWrEn = WireInit(false.B)
val sbcsfields = RegFieldGroup("sbcs", Some("system bus access control and status"), Seq(
RegField.r(1, SBCSRdData.sbaccess8, RegFieldDesc("sbaccess8", "8-bit accesses supported", reset=Some(if (cfg.maxSupportedSBAccess >= 8) 1 else 0))),
RegField.r(1, SBCSRdData.sbaccess16, RegFieldDesc("sbaccess16", "16-bit accesses supported", reset=Some(if (cfg.maxSupportedSBAccess >= 16) 1 else 0))),
RegField.r(1, SBCSRdData.sbaccess32, RegFieldDesc("sbaccess32", "32-bit accesses supported", reset=Some(if (cfg.maxSupportedSBAccess >= 32) 1 else 0))),
RegField.r(1, SBCSRdData.sbaccess64, RegFieldDesc("sbaccess64", "64-bit accesses supported", reset=Some(if (cfg.maxSupportedSBAccess >= 64) 1 else 0))),
RegField.r(1, SBCSRdData.sbaccess128, RegFieldDesc("sbaccess128", "128-bit accesses supported", reset=Some(if (cfg.maxSupportedSBAccess == 128) 1 else 0))),
RegField.r(7, SBCSRdData.sbasize, RegFieldDesc("sbasize", "bits in address", reset=Some(sb2tl.module.edge.bundle.addressBits))),
WNotifyVal(3, SBCSRdData.sberror, SBCSWrData.sberror, sberrorWrEn,
RegFieldDesc("sberror", "system bus error", reset=Some(0), wrType=Some(RegFieldWrType.ONE_TO_CLEAR))),
WNotifyVal(1, SBCSRdData.sbreadondata, SBCSWrData.sbreadondata, sbreadondataWrEn,
RegFieldDesc("sbreadondata", "system bus read on data", reset=Some(0))),
WNotifyVal(1, SBCSRdData.sbautoincrement, SBCSWrData.sbautoincrement, sbautoincrementWrEn,
RegFieldDesc("sbautoincrement", "system bus auto-increment address", reset=Some(0))),
WNotifyVal(3, SBCSRdData.sbaccess, SBCSWrData.sbaccess, sbaccessWrEn,
RegFieldDesc("sbaccess", "system bus access size", reset=Some(2))),
WNotifyVal(1, SBCSRdData.sbreadonaddr, SBCSWrData.sbreadonaddr, sbreadonaddrWrEn,
RegFieldDesc("sbreadonaddr", "system bus read on data", reset=Some(0))),
RegField.r(1, SBCSRdData.sbbusy, RegFieldDesc("sbbusy", "system bus access is busy", reset=Some(0))),
WNotifyVal(1, SBCSRdData.sbbusyerror, SBCSWrData.sbbusyerror, sbbusyerrorWrEn,
RegFieldDesc("sbbusyerror", "system bus busy error", reset=Some(0), wrType=Some(RegFieldWrType.ONE_TO_CLEAR))),
RegField(6),
RegField.r(3, SBCSRdData.sbversion, RegFieldDesc("sbversion", "system bus access version", reset=Some(1))),
))
// --- System Bus Address Registers ---
// ADDR0 Register is required
// Instantiate ADDR1-3 registers as needed depending on system bus address width
val hasSBAddr1 = (sb2tl.module.edge.bundle.addressBits >= 33)
val hasSBAddr2 = (sb2tl.module.edge.bundle.addressBits >= 65)
val hasSBAddr3 = (sb2tl.module.edge.bundle.addressBits >= 97)
val hasAddr = Seq(true, hasSBAddr1, hasSBAddr2, hasSBAddr3)
val SBADDRESSFieldsReg = Reg(Vec(4, UInt(32.W)))
SBADDRESSFieldsReg.zipWithIndex.foreach { case(a,i) => a.suggestName("SBADDRESS"+i+"FieldsReg")}
val SBADDRESSWrData = WireInit(VecInit(Seq.fill(4) {0.U(32.W)} ))
val SBADDRESSRdEn = WireInit(VecInit(Seq.fill(4) {false.B} ))
val SBADDRESSWrEn = WireInit(VecInit(Seq.fill(4) {false.B} ))
val autoIncrementedAddr = WireInit(0.U(128.W))
autoIncrementedAddr := Cat(SBADDRESSFieldsReg.reverse) + (1.U << SBCSFieldsReg.sbaccess)
autoIncrementedAddr.suggestName("autoIncrementedAddr")
val sbaddrfields: Seq[Seq[RegField]] = SBADDRESSFieldsReg.zipWithIndex.map { case(a,i) =>
if(hasAddr(i)) {
when (~dmactive || ~dmAuthenticated) {
a := 0.U(32.W)
}.otherwise {
a := Mux(SBADDRESSWrEn(i) && !SBCSRdData.sberror && !SBCSFieldsReg.sbbusy && !SBCSFieldsReg.sbbusyerror, SBADDRESSWrData(i),
Mux((sb2tl.module.io.rdDone || sb2tl.module.io.wrDone) && SBCSFieldsReg.sbautoincrement, autoIncrementedAddr(32*i+31,32*i), a))
}
RegFieldGroup("dmi_sbaddr"+i, Some("SBA Address Register"), Seq(RWNotify(32, a, SBADDRESSWrData(i), SBADDRESSRdEn(i), SBADDRESSWrEn(i),
Some(RegFieldDesc("dmi_sbaddr"+i, "SBA address register", reset=Some(0), volatile=true)))))
} else {
a := DontCare
Seq.empty[RegField]
}
}
sb2tl.module.io.addrIn := Mux(SBADDRESSWrEn(0),
Cat(Cat(SBADDRESSFieldsReg.drop(1).reverse), SBADDRESSWrData(0)),
Cat(SBADDRESSFieldsReg.reverse))
anyAddressWrEn := SBADDRESSWrEn.reduce(_ || _)
// --- System Bus Data Registers ---
// DATA0 Register is required
// DATA1-3 Registers may not be needed depending on implementation
val hasSBData1 = (cfg.maxSupportedSBAccess > 32)
val hasSBData2And3 = (cfg.maxSupportedSBAccess == 128)
val hasData = Seq(true, hasSBData1, hasSBData2And3, hasSBData2And3)
val SBDATAFieldsReg = Reg(Vec(4, Vec(4, UInt(8.W))))
SBDATAFieldsReg.zipWithIndex.foreach { case(d,i) => d.zipWithIndex.foreach { case(d,j) => d.suggestName("SBDATA"+i+"BYTE"+j) }}
val SBDATARdData = WireInit(VecInit(Seq.fill(4) {0.U(32.W)} ))
SBDATARdData.zipWithIndex.foreach { case(d,i) => d.suggestName("SBDATARdData"+i) }
val SBDATAWrData = WireInit(VecInit(Seq.fill(4) {0.U(32.W)} ))
SBDATAWrData.zipWithIndex.foreach { case(d,i) => d.suggestName("SBDATAWrData"+i) }
val SBDATARdEn = WireInit(VecInit(Seq.fill(4) {false.B} ))
val SBDATAWrEn = WireInit(VecInit(Seq.fill(4) {false.B} ))
SBDATAWrEn.zipWithIndex.foreach { case(d,i) => d.suggestName("SBDATAWrEn"+i) }
val sbdatafields: Seq[Seq[RegField]] = SBDATAFieldsReg.zipWithIndex.map { case(d,i) =>
if(hasData(i)) {
// For data registers, load enable per-byte
for (j <- 0 to 3) {
when (~dmactive || ~dmAuthenticated) {
d(j) := 0.U(8.W)
}.otherwise {
d(j) := Mux(SBDATAWrEn(i) && !SBCSFieldsReg.sbbusy && !SBCSFieldsReg.sbbusyerror && !SBCSRdData.sberror, SBDATAWrData(i)(8*j+7,8*j),
Mux(sb2tl.module.io.rdLoad(4*i+j), sb2tl.module.io.dataOut, d(j)))
}
}
SBDATARdData(i) := Cat(d.reverse)
RegFieldGroup("dmi_sbdata"+i, Some("SBA Data Register"), Seq(RWNotify(32, SBDATARdData(i), SBDATAWrData(i), SBDATARdEn(i), SBDATAWrEn(i),
Some(RegFieldDesc("dmi_sbdata"+i, "SBA data register", reset=Some(0), volatile=true)))))
} else {
for (j <- 0 to 3) { d(j) := DontCare }
Seq.empty[RegField]
}
}
sb2tl.module.io.dataIn := Mux(sb2tl.module.io.wrEn,Cat(SBDATAWrData.reverse),Cat(SBDATAFieldsReg.flatten.reverse))
anyDataRdEn := SBDATARdEn.reduce(_ || _)
anyDataWrEn := SBDATAWrEn.reduce(_ || _)
val tryWrEn = SBDATAWrEn(0)
val tryRdEn = (SBADDRESSWrEn(0) && SBCSFieldsReg.sbreadonaddr) || (SBDATARdEn(0) && SBCSFieldsReg.sbreadondata)
val sbAccessError = (SBCSFieldsReg.sbaccess === 0.U) && (SBCSFieldsReg.sbaccess8 =/= 1.U) ||
(SBCSFieldsReg.sbaccess === 1.U) && (SBCSFieldsReg.sbaccess16 =/= 1.U) ||
(SBCSFieldsReg.sbaccess === 2.U) && (SBCSFieldsReg.sbaccess32 =/= 1.U) ||
(SBCSFieldsReg.sbaccess === 3.U) && (SBCSFieldsReg.sbaccess64 =/= 1.U) ||
(SBCSFieldsReg.sbaccess === 4.U) && (SBCSFieldsReg.sbaccess128 =/= 1.U) || (SBCSFieldsReg.sbaccess > 4.U)
val compareAddr = Wire(UInt(32.W)) // Need use written or latched address to detect error case depending on how transaction is initiated
compareAddr := Mux(SBADDRESSWrEn(0),SBADDRESSWrData(0),SBADDRESSFieldsReg(0))
val sbAlignmentError = (SBCSFieldsReg.sbaccess === 1.U) && (compareAddr(0) =/= 0.U) ||
(SBCSFieldsReg.sbaccess === 2.U) && (compareAddr(1,0) =/= 0.U) ||
(SBCSFieldsReg.sbaccess === 3.U) && (compareAddr(2,0) =/= 0.U) ||
(SBCSFieldsReg.sbaccess === 4.U) && (compareAddr(3,0) =/= 0.U)
sbAccessError.suggestName("sbAccessError")
sbAlignmentError.suggestName("sbAlignmentError")
sb2tl.module.io.wrEn := dmAuthenticated && tryWrEn && !SBCSFieldsReg.sbbusy && !SBCSFieldsReg.sbbusyerror && !SBCSRdData.sberror && !sbAccessError && !sbAlignmentError
sb2tl.module.io.rdEn := dmAuthenticated && tryRdEn && !SBCSFieldsReg.sbbusy && !SBCSFieldsReg.sbbusyerror && !SBCSRdData.sberror && !sbAccessError && !sbAlignmentError
sb2tl.module.io.sizeIn := SBCSFieldsReg.sbaccess
val sbBusy = (sb2tl.module.io.sbStateOut =/= SystemBusAccessState.Idle.id.U)
when (~dmactive || ~dmAuthenticated) {
SBCSFieldsReg := SBCSFieldsRegReset
}.otherwise {
SBCSFieldsReg.sbbusyerror := Mux(sbbusyerrorWrEn && SBCSWrData.sbbusyerror, false.B, // W1C
Mux(anyAddressWrEn && sbBusy, true.B, // Set if a write to SBADDRESS occurs while busy
Mux((anyDataRdEn || anyDataWrEn) && sbBusy, true.B, SBCSFieldsReg.sbbusyerror))) // Set if any access to SBDATA occurs while busy
SBCSFieldsReg.sbreadonaddr := Mux(sbreadonaddrWrEn, SBCSWrData.sbreadonaddr , SBCSFieldsReg.sbreadonaddr)
SBCSFieldsReg.sbautoincrement := Mux(sbautoincrementWrEn, SBCSWrData.sbautoincrement, SBCSFieldsReg.sbautoincrement)
SBCSFieldsReg.sbreadondata := Mux(sbreadondataWrEn, SBCSWrData.sbreadondata , SBCSFieldsReg.sbreadondata)
SBCSFieldsReg.sbaccess := Mux(sbaccessWrEn, SBCSWrData.sbaccess, SBCSFieldsReg.sbaccess)
SBCSFieldsReg.sbversion := 1.U(1.W) // This code implements a version of the spec after January 1, 2018
}
// sbErrorReg has a per-bit load enable since each bit can be individually cleared by writing a 1 to it
val sbErrorReg = Reg(Vec(4, UInt(1.W)))
when(~dmactive || ~dmAuthenticated) {
for (i <- 0 until 3)
sbErrorReg(i) := 0.U
}.otherwise {
for (i <- 0 until 3)
sbErrorReg(i) := Mux(sberrorWrEn && SBCSWrData.sberror(i) === 1.U, NoError.id.U.extract(i), // W1C
Mux((sb2tl.module.io.wrEn && !sb2tl.module.io.wrLegal) || (sb2tl.module.io.rdEn && !sb2tl.module.io.rdLegal), BadAddr.id.U.extract(i), // Bad address accessed
Mux((tryWrEn || tryRdEn) && sbAlignmentError, AlgnError.id.U.extract(i), // Address alignment error
Mux((tryWrEn || tryRdEn) && sbAccessError, BadAccess.id.U.extract(i), // Access size error
Mux((sb2tl.module.io.rdDone || sb2tl.module.io.wrDone) && sb2tl.module.io.respError, OtherError.id.U.extract(i), sbErrorReg(i)))))) // Response error from TL
}
SBCSRdData := SBCSFieldsReg
SBCSRdData.sbasize := sb2tl.module.edge.bundle.addressBits.U
SBCSRdData.sbaccess128 := (cfg.maxSupportedSBAccess == 128).B
SBCSRdData.sbaccess64 := (cfg.maxSupportedSBAccess >= 64).B
SBCSRdData.sbaccess32 := (cfg.maxSupportedSBAccess >= 32).B
SBCSRdData.sbaccess16 := (cfg.maxSupportedSBAccess >= 16).B
SBCSRdData.sbaccess8 := (cfg.maxSupportedSBAccess >= 8).B
SBCSRdData.sbbusy := sbBusy
SBCSRdData.sberror := sbErrorReg.asUInt
when (~dmAuthenticated) { // Read value must be 0 if not authenticated
SBCSRdData := 0.U.asTypeOf(new SBCSFields())
}
property.cover(SBCSFieldsReg.sbbusyerror, "SBCS Cover", "sberror set")
property.cover(SBCSFieldsReg.sbbusy === 3.U, "SBCS Cover", "sbbusyerror alignment error")
property.cover((sb2tl.module.io.wrEn || sb2tl.module.io.rdEn) && SBCSFieldsReg.sbaccess === 0.U && !sbAccessError && !sbAlignmentError, "SBCS Cover", "8-bit access")
property.cover((sb2tl.module.io.wrEn || sb2tl.module.io.rdEn) && SBCSFieldsReg.sbaccess === 1.U && !sbAccessError && !sbAlignmentError, "SBCS Cover", "16-bit access")
property.cover((sb2tl.module.io.wrEn || sb2tl.module.io.rdEn) && SBCSFieldsReg.sbaccess === 2.U && !sbAccessError && !sbAlignmentError, "SBCS Cover", "32-bit access")
property.cover((sb2tl.module.io.wrEn || sb2tl.module.io.rdEn) && SBCSFieldsReg.sbaccess === 3.U && !sbAccessError && !sbAlignmentError, "SBCS Cover", "64-bit access")
property.cover((sb2tl.module.io.wrEn || sb2tl.module.io.rdEn) && SBCSFieldsReg.sbaccess === 4.U && !sbAccessError && !sbAlignmentError, "SBCS Cover", "128-bit access")
property.cover(SBCSFieldsReg.sbautoincrement && SBCSFieldsReg.sbbusy, "SBCS Cover", "Access with autoincrement set")
property.cover(!SBCSFieldsReg.sbautoincrement && SBCSFieldsReg.sbbusy, "SBCS Cover", "Access without autoincrement set")
property.cover((sb2tl.module.io.wrEn || sb2tl.module.io.rdEn) && SBCSFieldsReg.sbaccess > 4.U, "SBCS Cover", "Invalid sbaccess value")
(sbcsfields, sbaddrfields, sbdatafields)
}
}
class SBToTL(implicit p: Parameters) extends LazyModule {
val cfg = p(DebugModuleKey).get
val node = TLClientNode(Seq(TLMasterPortParameters.v1(
clients = Seq(TLMasterParameters.v1("debug")),
requestFields = Seq(AMBAProtField()))))
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
val io = IO(new Bundle {
val rdEn = Input(Bool())
val wrEn = Input(Bool())
val addrIn = Input(UInt(128.W)) // TODO: Parameterize these widths
val dataIn = Input(UInt(128.W))
val sizeIn = Input(UInt(3.W))
val rdLegal = Output(Bool())
val wrLegal = Output(Bool())
val rdDone = Output(Bool())
val wrDone = Output(Bool())
val respError = Output(Bool())
val dataOut = Output(UInt(8.W))
val rdLoad = Output(Vec(cfg.maxSupportedSBAccess/8, Bool()))
val sbStateOut = Output(UInt(log2Ceil(SystemBusAccessState.maxId).W))
})
val rf_reset = IO(Input(Reset()))
import SystemBusAccessState._
val (tl, edge) = node.out(0)
val sbState = RegInit(0.U)
// --- Drive payloads on bus to TileLink ---
val d = Queue(tl.d, 2) // Add a small buffer since response could arrive on same cycle as request
d.ready := (sbState === SBReadResponse.id.U) || (sbState === SBWriteResponse.id.U)
val muxedData = WireInit(0.U(8.W))
val requestValid = tl.a.valid
val requestReady = tl.a.ready
val responseValid = d.valid
val responseReady = d.ready
val counter = RegInit(0.U((log2Ceil(cfg.maxSupportedSBAccess/8)+1).W))
val vecData = Wire(Vec(cfg.maxSupportedSBAccess/8, UInt(8.W)))
vecData.zipWithIndex.map { case (vd, i) => vd := io.dataIn(8*i+7,8*i) }
muxedData := vecData(counter(log2Ceil(vecData.size)-1,0))
// Need an additional check to determine if address is safe for Get/Put
val rdLegal_addr = edge.manager.supportsGetSafe(io.addrIn, io.sizeIn, Some(TransferSizes(1,cfg.maxSupportedSBAccess/8)))
val wrLegal_addr = edge.manager.supportsPutFullSafe(io.addrIn, io.sizeIn, Some(TransferSizes(1,cfg.maxSupportedSBAccess/8)))
val (_, gbits) = edge.Get(0.U, io.addrIn, io.sizeIn)
val (_, pfbits) = edge.Put(0.U, io.addrIn, io.sizeIn, muxedData)
io.rdLegal := rdLegal_addr
io.wrLegal := wrLegal_addr
io.sbStateOut := sbState
when(sbState === SBReadRequest.id.U) { tl.a.bits := gbits }
.otherwise { tl.a.bits := pfbits }
tl.a.bits.user.lift(AMBAProt).foreach { x =>
x.bufferable := false.B
x.modifiable := false.B
x.readalloc := false.B
x.writealloc := false.B
x.privileged := true.B
x.secure := true.B
x.fetch := false.B
}
val respError = d.bits.denied || d.bits.corrupt
io.respError := respError
val wrTxValid = sbState === SBWriteRequest.id.U && requestValid && requestReady
val rdTxValid = sbState === SBReadResponse.id.U && responseValid && responseReady
val txLast = counter === ((1.U << io.sizeIn) - 1.U)
counter := Mux((wrTxValid || rdTxValid) && txLast, 0.U,
Mux((wrTxValid || rdTxValid) , counter+1.U, counter))
for (i <- 0 until (cfg.maxSupportedSBAccess/8)) {
io.rdLoad(i) := rdTxValid && (counter === i.U)
}
// --- State Machine to interface with TileLink ---
when (sbState === Idle.id.U){
sbState := Mux(io.rdEn && io.rdLegal, SBReadRequest.id.U,
Mux(io.wrEn && io.wrLegal, SBWriteRequest.id.U, sbState))
}.elsewhen (sbState === SBReadRequest.id.U){
sbState := Mux(requestValid && requestReady, SBReadResponse.id.U, sbState)
}.elsewhen (sbState === SBWriteRequest.id.U){
sbState := Mux(wrTxValid && txLast, SBWriteResponse.id.U, sbState)
}.elsewhen (sbState === SBReadResponse.id.U){
sbState := Mux(rdTxValid && txLast, Idle.id.U, sbState)
}.elsewhen (sbState === SBWriteResponse.id.U){
sbState := Mux(responseValid && responseReady, Idle.id.U, sbState)
}
io.rdDone := rdTxValid && txLast
io.wrDone := (sbState === SBWriteResponse.id.U) && responseValid && responseReady
io.dataOut := d.bits.data
tl.a.valid := (sbState === SBReadRequest.id.U) || (sbState === SBWriteRequest.id.U)
// Tie off unused channels
tl.b.ready := false.B
tl.c.valid := false.B
tl.e.valid := false.B
assert (sbState === Idle.id.U ||
sbState === SBReadRequest.id.U ||
sbState === SBWriteRequest.id.U ||
sbState === SBReadResponse.id.U ||
sbState === SBWriteResponse.id.U, "SBA state machine in undefined state")
property.cover (sbState === Idle.id.U, "SBA State Cover", "SBA Access Idle")
property.cover (sbState === SBReadRequest.id.U, "SBA State Cover", "SBA Access Read Req")
property.cover (sbState === SBWriteRequest.id.U, "SBA State Cover", "SBA Access Write Req")
property.cover (sbState === SBReadResponse.id.U, "SBA State Cover", "SBA Access Read Resp")
property.cover (sbState === SBWriteResponse.id.U, "SBA State Cover", "SBA Access Write Resp")
property.cover (io.rdEn && !io.rdLegal, "SB Legality Cover", "SBA Rd Address Illegal")
property.cover (io.wrEn && !io.wrLegal, "SB Legality Cover", "SBA Wr Address Illegal")
}
}
| module SBToTL( // @[SBA.scala:273:9]
input clock, // @[SBA.scala:273:9]
input reset, // @[SBA.scala:273:9]
input auto_out_a_ready, // @[LazyModuleImp.scala:107:25]
output auto_out_a_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_out_a_bits_size, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_out_a_bits_address, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_out_a_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_out_d_ready, // @[LazyModuleImp.scala:107:25]
input auto_out_d_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_out_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_out_d_bits_param, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_out_d_bits_size, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_out_d_bits_sink, // @[LazyModuleImp.scala:107:25]
input auto_out_d_bits_denied, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_out_d_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_out_d_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input io_rdEn, // @[SBA.scala:274:16]
input io_wrEn, // @[SBA.scala:274:16]
input [127:0] io_addrIn, // @[SBA.scala:274:16]
input [127:0] io_dataIn, // @[SBA.scala:274:16]
input [2:0] io_sizeIn, // @[SBA.scala:274:16]
output io_rdLegal, // @[SBA.scala:274:16]
output io_wrLegal, // @[SBA.scala:274:16]
output io_rdDone, // @[SBA.scala:274:16]
output io_wrDone, // @[SBA.scala:274:16]
output io_respError, // @[SBA.scala:274:16]
output [7:0] io_dataOut, // @[SBA.scala:274:16]
output io_rdLoad_0, // @[SBA.scala:274:16]
output io_rdLoad_1, // @[SBA.scala:274:16]
output io_rdLoad_2, // @[SBA.scala:274:16]
output io_rdLoad_3, // @[SBA.scala:274:16]
output io_rdLoad_4, // @[SBA.scala:274:16]
output io_rdLoad_5, // @[SBA.scala:274:16]
output io_rdLoad_6, // @[SBA.scala:274:16]
output io_rdLoad_7, // @[SBA.scala:274:16]
output [2:0] io_sbStateOut // @[SBA.scala:274:16]
);
wire _d_q_io_deq_valid; // @[Decoupled.scala:362:21]
wire _d_q_io_deq_bits_denied; // @[Decoupled.scala:362:21]
wire _d_q_io_deq_bits_corrupt; // @[Decoupled.scala:362:21]
reg [2:0] sbState; // @[SBA.scala:295:26]
wire _rdTxValid_T = sbState == 3'h3; // @[SBA.scala:295:26, :299:25]
wire _io_wrDone_T = sbState == 3'h4; // @[SBA.scala:295:26, :299:62]
wire d_q_io_deq_ready = _rdTxValid_T | _io_wrDone_T; // @[SBA.scala:299:{25,50,62}]
reg [3:0] counter; // @[SBA.scala:307:26]
wire [7:0][7:0] _GEN = {{io_dataIn[63:56]}, {io_dataIn[55:48]}, {io_dataIn[47:40]}, {io_dataIn[39:32]}, {io_dataIn[31:24]}, {io_dataIn[23:16]}, {io_dataIn[15:8]}, {io_dataIn[7:0]}}; // @[SBA.scala:309:63, :310:15]
wire [118:0] _GEN_0 = {io_addrIn[127:14], io_addrIn[13:9] ^ 5'h11}; // @[Parameters.scala:137:{31,41,46}]
wire [115:0] _GEN_1 = {io_addrIn[127:14], ~(io_addrIn[13:12])}; // @[Parameters.scala:137:{31,41,46}]
wire [114:0] _GEN_2 = {io_addrIn[127:21], io_addrIn[20:17] ^ 4'h8, io_addrIn[15:12]}; // @[Parameters.scala:137:{31,41,46}]
wire [111:0] _GEN_3 = {io_addrIn[127:26], io_addrIn[25:16] ^ 10'h200}; // @[Parameters.scala:137:{31,41,46}]
wire [115:0] _GEN_4 = {io_addrIn[127:26], io_addrIn[25:12] ^ 14'h2010}; // @[Parameters.scala:137:{31,41,46}]
wire [111:0] _GEN_5 = {io_addrIn[127:28], io_addrIn[27:16] ^ 12'h800}; // @[Parameters.scala:137:{31,41,46}]
wire [101:0] _GEN_6 = {io_addrIn[127:28], ~(io_addrIn[27:26])}; // @[Parameters.scala:137:{31,41,46}]
wire [115:0] _GEN_7 = {io_addrIn[127:29], io_addrIn[28:12] ^ 17'h10020}; // @[Parameters.scala:137:{31,41,46}]
wire [99:0] _GEN_8 = {io_addrIn[127:32], io_addrIn[31:28] ^ 4'h8}; // @[Parameters.scala:137:{31,41,46}]
wire io_rdLegal_0 = ~(|(io_addrIn[127:13])) | ~(|_GEN_0) | ~(|_GEN_1) | {io_addrIn[127:17], ~(io_addrIn[16])} == 112'h0 | ~(|_GEN_2) | ~(|_GEN_3) | ~(|_GEN_4) | ~(|_GEN_5) | ~(|_GEN_6) | ~(|_GEN_7) | ~(|_GEN_8); // @[Parameters.scala:685:42]
wire io_wrLegal_0 = ~(|(io_addrIn[127:13])) | ~(|_GEN_0) | ~(|_GEN_1) | ~(|_GEN_2) | ~(|_GEN_3) | ~(|_GEN_4) | ~(|_GEN_5) | ~(|_GEN_6) | ~(|_GEN_7) | ~(|_GEN_8); // @[Parameters.scala:685:42]
wire _nodeOut_a_valid_T = sbState == 3'h1; // @[SBA.scala:295:26, :322:18]
wire _nodeOut_a_valid_T_1 = sbState == 3'h2; // @[SBA.scala:295:26, :338:29]
wire rdTxValid = _rdTxValid_T & _d_q_io_deq_valid & d_q_io_deq_ready; // @[Decoupled.scala:362:21]
wire txLast = {4'h0, counter} == (8'h1 << io_sizeIn) - 8'h1; // @[SBA.scala:307:26, :340:{29,39,53}]
wire _GEN_9 = sbState == 3'h0; // @[SBA.scala:295:26, :349:19]
wire nodeOut_a_valid = _nodeOut_a_valid_T | _nodeOut_a_valid_T_1; // @[SBA.scala:322:18, :338:29, :366:52] |
Generate the Verilog code corresponding to the following Chisel files.
File MemStreamerAccel.scala:
// See LICENSE for license details
package roccaccutils
import chisel3._
import org.chipsalliance.cde.config.{Parameters, Field}
import freechips.rocketchip.tile._
import freechips.rocketchip.rocket.{TLBConfig}
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.rocket.constants.MemoryOpConstants
import freechips.rocketchip.tilelink._
import freechips.rocketchip.subsystem.{SystemBusKey}
import roccaccutils.logger._
abstract class MemStreamerAccel(opcodes: OpcodeSet)(implicit p: Parameters)
extends LazyRoCC(opcodes=opcodes, nPTWPorts=2)
with HasL2MemHelperParams {
// --------------------------
// MUST BE DEFINED BY CHILD
// --------------------------
val tlbConfig: TLBConfig
val xbarBetweenMem: Boolean
val logger: Logger
// --------------------------
implicit val hp: L2MemHelperParams = L2MemHelperParams(p(SystemBusKey).beatBytes * 8)
val roccTLNode = if (xbarBetweenMem) atlNode else tlNode
val l2_memloader = LazyModule(new L2MemHelper(tlbConfig, printInfo="[memloader]", numOutstandingReqs=32, logger=logger))
roccTLNode := TLWidthWidget(BUS_SZ_BYTES) := TLBuffer.chainNode(1) := l2_memloader.masterNode
val l2_memwriter = LazyModule(new L2MemHelper(tlbConfig, printInfo="[memwriter]", numOutstandingReqs=32, logger=logger))
roccTLNode := TLWidthWidget(BUS_SZ_BYTES) := TLBuffer.chainNode(1) := l2_memwriter.masterNode
}
abstract class MemStreamerAccelImp(outer: MemStreamerAccel)(implicit p: Parameters)
extends LazyRoCCModuleImp(outer) with MemoryOpConstants {
// --------------------------
// MUST BE DEFINED BY CHILD
// --------------------------
val queueDepth: Int
val cmd_router: StreamingCommandRouter
val streamer: MemStreamer
// --------------------------
implicit val hp: L2MemHelperParams = outer.hp
io.mem.req.valid := false.B
io.mem.s1_kill := false.B
io.mem.s2_kill := false.B
io.mem.keep_clock_enabled := true.B
io.interrupt := false.B
io.busy := false.B
val memloader = Module(new MemLoader(memLoaderQueDepth=queueDepth, logger=outer.logger))
outer.l2_memloader.module.io.userif <> memloader.io.l2helperUser
memloader.io.src_info <> cmd_router.io.src_info
val memwriter = Module(new MemWriter32(cmd_que_depth=queueDepth, logger=outer.logger))
outer.l2_memwriter.module.io.userif <> memwriter.io.l2io
outer.l2_memloader.module.io.sfence <> cmd_router.io.sfence_out
outer.l2_memloader.module.io.status.valid := cmd_router.io.dmem_status_out.valid
outer.l2_memloader.module.io.status.bits := cmd_router.io.dmem_status_out.bits.status
io.ptw(0) <> outer.l2_memloader.module.io.ptw
outer.l2_memwriter.module.io.sfence <> cmd_router.io.sfence_out
outer.l2_memwriter.module.io.status.valid := cmd_router.io.dmem_status_out.valid
outer.l2_memwriter.module.io.status.bits := cmd_router.io.dmem_status_out.bits.status
io.ptw(1) <> outer.l2_memwriter.module.io.ptw
cmd_router.io.rocc_in <> io.cmd
io.resp <> cmd_router.io.rocc_out
streamer.io.mem_stream <> memloader.io.consumer
memwriter.io.memwrites_in <> streamer.io.memwrites_in
memwriter.io.decompress_dest_info <> cmd_router.io.dest_info
cmd_router.io.bufs_completed := memwriter.io.bufs_completed
cmd_router.io.no_writes_inflight := memwriter.io.no_writes_inflight
}
File Buffer.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.diplomacy.BufferParams
class TLBufferNode (
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)(implicit valName: ValName) extends TLAdapterNode(
clientFn = { p => p.v1copy(minLatency = p.minLatency + b.latency + c.latency) },
managerFn = { p => p.v1copy(minLatency = p.minLatency + a.latency + d.latency) }
) {
override lazy val nodedebugstring = s"a:${a.toString}, b:${b.toString}, c:${c.toString}, d:${d.toString}, e:${e.toString}"
override def circuitIdentity = List(a,b,c,d,e).forall(_ == BufferParams.none)
}
class TLBuffer(
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)(implicit p: Parameters) extends LazyModule
{
def this(ace: BufferParams, bd: BufferParams)(implicit p: Parameters) = this(ace, bd, ace, bd, ace)
def this(abcde: BufferParams)(implicit p: Parameters) = this(abcde, abcde)
def this()(implicit p: Parameters) = this(BufferParams.default)
val node = new TLBufferNode(a, b, c, d, e)
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
def headBundle = node.out.head._2.bundle
override def desiredName = (Seq("TLBuffer") ++ node.out.headOption.map(_._2.bundle.shortName)).mkString("_")
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
out.a <> a(in .a)
in .d <> d(out.d)
if (edgeOut.manager.anySupportAcquireB && edgeOut.client.anySupportProbe) {
in .b <> b(out.b)
out.c <> c(in .c)
out.e <> e(in .e)
} else {
in.b.valid := false.B
in.c.ready := true.B
in.e.ready := true.B
out.b.ready := true.B
out.c.valid := false.B
out.e.valid := false.B
}
}
}
}
object TLBuffer
{
def apply() (implicit p: Parameters): TLNode = apply(BufferParams.default)
def apply(abcde: BufferParams) (implicit p: Parameters): TLNode = apply(abcde, abcde)
def apply(ace: BufferParams, bd: BufferParams)(implicit p: Parameters): TLNode = apply(ace, bd, ace, bd, ace)
def apply(
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)(implicit p: Parameters): TLNode =
{
val buffer = LazyModule(new TLBuffer(a, b, c, d, e))
buffer.node
}
def chain(depth: Int, name: Option[String] = None)(implicit p: Parameters): Seq[TLNode] = {
val buffers = Seq.fill(depth) { LazyModule(new TLBuffer()) }
name.foreach { n => buffers.zipWithIndex.foreach { case (b, i) => b.suggestName(s"${n}_${i}") } }
buffers.map(_.node)
}
def chainNode(depth: Int, name: Option[String] = None)(implicit p: Parameters): TLNode = {
chain(depth, name)
.reduceLeftOption(_ :*=* _)
.getOrElse(TLNameNode("no_buffer"))
}
}
File Top.scala:
// See LICENSE for license details
package aes
import chisel3._
import org.chipsalliance.cde.config.{Parameters, Field}
import freechips.rocketchip.tile._
import freechips.rocketchip.rocket.{TLBConfig}
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.subsystem.{SystemBusKey}
import freechips.rocketchip.rocket.constants.MemoryOpConstants
import freechips.rocketchip.tilelink._
import roccaccutils._
case object AES256AccelTLB extends Field[Option[TLBConfig]](None)
class AES256ECBAccel(opcodes: OpcodeSet)(implicit p: Parameters) extends MemStreamerAccel(
opcodes = opcodes) {
override lazy val module = new AES256ECBAccelImp(this)
require(p(SystemBusKey).beatBytes == 32, "Only tested on 32B SBUS width") // TODO: should work for 128b
lazy val tlbConfig = p(AES256AccelTLB).get
lazy val xbarBetweenMem = p(AES256ECBAccelInsertXbarBetweenMemory)
lazy val logger = AES256ECBLogger
}
class AES256ECBAccelImp(outer: AES256ECBAccel)(implicit p: Parameters)
extends MemStreamerAccelImp(outer) {
lazy val queueDepth = p(AES256ECBAccelCmdQueueDepth)
lazy val cmd_router = Module(new CommandRouter(queueDepth))
lazy val streamer = Module(new AES256ECB(outer.logger))
streamer.io.key <> cmd_router.io.key
streamer.io.mode <> cmd_router.io.mode
}
File LazyRoCC.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tile
import chisel3._
import chisel3.util._
import chisel3.experimental.IntParam
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.rocket.{
MStatus, HellaCacheIO, TLBPTWIO, CanHavePTW, CanHavePTWModule,
SimpleHellaCacheIF, M_XRD, PTE, PRV, M_SZ
}
import freechips.rocketchip.tilelink.{
TLNode, TLIdentityNode, TLClientNode, TLMasterParameters, TLMasterPortParameters
}
import freechips.rocketchip.util.InOrderArbiter
case object BuildRoCC extends Field[Seq[Parameters => LazyRoCC]](Nil)
class RoCCInstruction extends Bundle {
val funct = Bits(7.W)
val rs2 = Bits(5.W)
val rs1 = Bits(5.W)
val xd = Bool()
val xs1 = Bool()
val xs2 = Bool()
val rd = Bits(5.W)
val opcode = Bits(7.W)
}
class RoCCCommand(implicit p: Parameters) extends CoreBundle()(p) {
val inst = new RoCCInstruction
val rs1 = Bits(xLen.W)
val rs2 = Bits(xLen.W)
val status = new MStatus
}
class RoCCResponse(implicit p: Parameters) extends CoreBundle()(p) {
val rd = Bits(5.W)
val data = Bits(xLen.W)
}
class RoCCCoreIO(val nRoCCCSRs: Int = 0)(implicit p: Parameters) extends CoreBundle()(p) {
val cmd = Flipped(Decoupled(new RoCCCommand))
val resp = Decoupled(new RoCCResponse)
val mem = new HellaCacheIO
val busy = Output(Bool())
val interrupt = Output(Bool())
val exception = Input(Bool())
val csrs = Flipped(Vec(nRoCCCSRs, new CustomCSRIO))
}
class RoCCIO(val nPTWPorts: Int, nRoCCCSRs: Int)(implicit p: Parameters) extends RoCCCoreIO(nRoCCCSRs)(p) {
val ptw = Vec(nPTWPorts, new TLBPTWIO)
val fpu_req = Decoupled(new FPInput)
val fpu_resp = Flipped(Decoupled(new FPResult))
}
/** Base classes for Diplomatic TL2 RoCC units **/
abstract class LazyRoCC(
val opcodes: OpcodeSet,
val nPTWPorts: Int = 0,
val usesFPU: Boolean = false,
val roccCSRs: Seq[CustomCSR] = Nil
)(implicit p: Parameters) extends LazyModule {
val module: LazyRoCCModuleImp
require(roccCSRs.map(_.id).toSet.size == roccCSRs.size)
val atlNode: TLNode = TLIdentityNode()
val tlNode: TLNode = TLIdentityNode()
val stlNode: TLNode = TLIdentityNode()
}
class LazyRoCCModuleImp(outer: LazyRoCC) extends LazyModuleImp(outer) {
val io = IO(new RoCCIO(outer.nPTWPorts, outer.roccCSRs.size))
io := DontCare
}
/** Mixins for including RoCC **/
trait HasLazyRoCC extends CanHavePTW { this: BaseTile =>
val roccs = p(BuildRoCC).map(_(p))
val roccCSRs = roccs.map(_.roccCSRs) // the set of custom CSRs requested by all roccs
require(roccCSRs.flatten.map(_.id).toSet.size == roccCSRs.flatten.size,
"LazyRoCC instantiations require overlapping CSRs")
roccs.map(_.atlNode).foreach { atl => tlMasterXbar.node :=* atl }
roccs.map(_.tlNode).foreach { tl => tlOtherMastersNode :=* tl }
roccs.map(_.stlNode).foreach { stl => stl :*= tlSlaveXbar.node }
nPTWPorts += roccs.map(_.nPTWPorts).sum
nDCachePorts += roccs.size
}
trait HasLazyRoCCModule extends CanHavePTWModule
with HasCoreParameters { this: RocketTileModuleImp =>
val (respArb, cmdRouter) = if(outer.roccs.nonEmpty) {
val respArb = Module(new RRArbiter(new RoCCResponse()(outer.p), outer.roccs.size))
val cmdRouter = Module(new RoccCommandRouter(outer.roccs.map(_.opcodes))(outer.p))
outer.roccs.zipWithIndex.foreach { case (rocc, i) =>
rocc.module.io.ptw ++=: ptwPorts
rocc.module.io.cmd <> cmdRouter.io.out(i)
val dcIF = Module(new SimpleHellaCacheIF()(outer.p))
dcIF.io.requestor <> rocc.module.io.mem
dcachePorts += dcIF.io.cache
respArb.io.in(i) <> Queue(rocc.module.io.resp)
}
(Some(respArb), Some(cmdRouter))
} else {
(None, None)
}
val roccCSRIOs = outer.roccs.map(_.module.io.csrs)
}
class AccumulatorExample(opcodes: OpcodeSet, val n: Int = 4)(implicit p: Parameters) extends LazyRoCC(opcodes) {
override lazy val module = new AccumulatorExampleModuleImp(this)
}
class AccumulatorExampleModuleImp(outer: AccumulatorExample)(implicit p: Parameters) extends LazyRoCCModuleImp(outer)
with HasCoreParameters {
val regfile = Mem(outer.n, UInt(xLen.W))
val busy = RegInit(VecInit(Seq.fill(outer.n){false.B}))
val cmd = Queue(io.cmd)
val funct = cmd.bits.inst.funct
val addr = cmd.bits.rs2(log2Up(outer.n)-1,0)
val doWrite = funct === 0.U
val doRead = funct === 1.U
val doLoad = funct === 2.U
val doAccum = funct === 3.U
val memRespTag = io.mem.resp.bits.tag(log2Up(outer.n)-1,0)
// datapath
val addend = cmd.bits.rs1
val accum = regfile(addr)
val wdata = Mux(doWrite, addend, accum + addend)
when (cmd.fire && (doWrite || doAccum)) {
regfile(addr) := wdata
}
when (io.mem.resp.valid) {
regfile(memRespTag) := io.mem.resp.bits.data
busy(memRespTag) := false.B
}
// control
when (io.mem.req.fire) {
busy(addr) := true.B
}
val doResp = cmd.bits.inst.xd
val stallReg = busy(addr)
val stallLoad = doLoad && !io.mem.req.ready
val stallResp = doResp && !io.resp.ready
cmd.ready := !stallReg && !stallLoad && !stallResp
// command resolved if no stalls AND not issuing a load that will need a request
// PROC RESPONSE INTERFACE
io.resp.valid := cmd.valid && doResp && !stallReg && !stallLoad
// valid response if valid command, need a response, and no stalls
io.resp.bits.rd := cmd.bits.inst.rd
// Must respond with the appropriate tag or undefined behavior
io.resp.bits.data := accum
// Semantics is to always send out prior accumulator register value
io.busy := cmd.valid || busy.reduce(_||_)
// Be busy when have pending memory requests or committed possibility of pending requests
io.interrupt := false.B
// Set this true to trigger an interrupt on the processor (please refer to supervisor documentation)
// MEMORY REQUEST INTERFACE
io.mem.req.valid := cmd.valid && doLoad && !stallReg && !stallResp
io.mem.req.bits.addr := addend
io.mem.req.bits.tag := addr
io.mem.req.bits.cmd := M_XRD // perform a load (M_XWR for stores)
io.mem.req.bits.size := log2Ceil(8).U
io.mem.req.bits.signed := false.B
io.mem.req.bits.data := 0.U // we're not performing any stores...
io.mem.req.bits.phys := false.B
io.mem.req.bits.dprv := cmd.bits.status.dprv
io.mem.req.bits.dv := cmd.bits.status.dv
io.mem.req.bits.no_resp := false.B
}
class TranslatorExample(opcodes: OpcodeSet)(implicit p: Parameters) extends LazyRoCC(opcodes, nPTWPorts = 1) {
override lazy val module = new TranslatorExampleModuleImp(this)
}
class TranslatorExampleModuleImp(outer: TranslatorExample)(implicit p: Parameters) extends LazyRoCCModuleImp(outer)
with HasCoreParameters {
val req_addr = Reg(UInt(coreMaxAddrBits.W))
val req_rd = Reg(chiselTypeOf(io.resp.bits.rd))
val req_offset = req_addr(pgIdxBits - 1, 0)
val req_vpn = req_addr(coreMaxAddrBits - 1, pgIdxBits)
val pte = Reg(new PTE)
val s_idle :: s_ptw_req :: s_ptw_resp :: s_resp :: Nil = Enum(4)
val state = RegInit(s_idle)
io.cmd.ready := (state === s_idle)
when (io.cmd.fire) {
req_rd := io.cmd.bits.inst.rd
req_addr := io.cmd.bits.rs1
state := s_ptw_req
}
private val ptw = io.ptw(0)
when (ptw.req.fire) { state := s_ptw_resp }
when (state === s_ptw_resp && ptw.resp.valid) {
pte := ptw.resp.bits.pte
state := s_resp
}
when (io.resp.fire) { state := s_idle }
ptw.req.valid := (state === s_ptw_req)
ptw.req.bits.valid := true.B
ptw.req.bits.bits.addr := req_vpn
io.resp.valid := (state === s_resp)
io.resp.bits.rd := req_rd
io.resp.bits.data := Mux(pte.leaf(), Cat(pte.ppn, req_offset), -1.S(xLen.W).asUInt)
io.busy := (state =/= s_idle)
io.interrupt := false.B
io.mem.req.valid := false.B
}
class CharacterCountExample(opcodes: OpcodeSet)(implicit p: Parameters) extends LazyRoCC(opcodes) {
override lazy val module = new CharacterCountExampleModuleImp(this)
override val atlNode = TLClientNode(Seq(TLMasterPortParameters.v1(Seq(TLMasterParameters.v1("CharacterCountRoCC")))))
}
class CharacterCountExampleModuleImp(outer: CharacterCountExample)(implicit p: Parameters) extends LazyRoCCModuleImp(outer)
with HasCoreParameters
with HasL1CacheParameters {
val cacheParams = tileParams.dcache.get
private val blockOffset = blockOffBits
private val beatOffset = log2Up(cacheDataBits/8)
val needle = Reg(UInt(8.W))
val addr = Reg(UInt(coreMaxAddrBits.W))
val count = Reg(UInt(xLen.W))
val resp_rd = Reg(chiselTypeOf(io.resp.bits.rd))
val addr_block = addr(coreMaxAddrBits - 1, blockOffset)
val offset = addr(blockOffset - 1, 0)
val next_addr = (addr_block + 1.U) << blockOffset.U
val s_idle :: s_acq :: s_gnt :: s_check :: s_resp :: Nil = Enum(5)
val state = RegInit(s_idle)
val (tl_out, edgesOut) = outer.atlNode.out(0)
val gnt = tl_out.d.bits
val recv_data = Reg(UInt(cacheDataBits.W))
val recv_beat = RegInit(0.U(log2Up(cacheDataBeats+1).W))
val data_bytes = VecInit(Seq.tabulate(cacheDataBits/8) { i => recv_data(8 * (i + 1) - 1, 8 * i) })
val zero_match = data_bytes.map(_ === 0.U)
val needle_match = data_bytes.map(_ === needle)
val first_zero = PriorityEncoder(zero_match)
val chars_found = PopCount(needle_match.zipWithIndex.map {
case (matches, i) =>
val idx = Cat(recv_beat - 1.U, i.U(beatOffset.W))
matches && idx >= offset && i.U <= first_zero
})
val zero_found = zero_match.reduce(_ || _)
val finished = Reg(Bool())
io.cmd.ready := (state === s_idle)
io.resp.valid := (state === s_resp)
io.resp.bits.rd := resp_rd
io.resp.bits.data := count
tl_out.a.valid := (state === s_acq)
tl_out.a.bits := edgesOut.Get(
fromSource = 0.U,
toAddress = addr_block << blockOffset,
lgSize = lgCacheBlockBytes.U)._2
tl_out.d.ready := (state === s_gnt)
when (io.cmd.fire) {
addr := io.cmd.bits.rs1
needle := io.cmd.bits.rs2
resp_rd := io.cmd.bits.inst.rd
count := 0.U
finished := false.B
state := s_acq
}
when (tl_out.a.fire) { state := s_gnt }
when (tl_out.d.fire) {
recv_beat := recv_beat + 1.U
recv_data := gnt.data
state := s_check
}
when (state === s_check) {
when (!finished) {
count := count + chars_found
}
when (zero_found) { finished := true.B }
when (recv_beat === cacheDataBeats.U) {
addr := next_addr
state := Mux(zero_found || finished, s_resp, s_acq)
recv_beat := 0.U
} .otherwise {
state := s_gnt
}
}
when (io.resp.fire) { state := s_idle }
io.busy := (state =/= s_idle)
io.interrupt := false.B
io.mem.req.valid := false.B
// Tie off unused channels
tl_out.b.ready := true.B
tl_out.c.valid := false.B
tl_out.e.valid := false.B
}
class BlackBoxExample(opcodes: OpcodeSet, blackBoxFile: String)(implicit p: Parameters)
extends LazyRoCC(opcodes) {
override lazy val module = new BlackBoxExampleModuleImp(this, blackBoxFile)
}
class BlackBoxExampleModuleImp(outer: BlackBoxExample, blackBoxFile: String)(implicit p: Parameters)
extends LazyRoCCModuleImp(outer)
with RequireSyncReset
with HasCoreParameters {
val blackbox = {
val roccIo = io
Module(
new BlackBox( Map( "xLen" -> IntParam(xLen),
"PRV_SZ" -> IntParam(PRV.SZ),
"coreMaxAddrBits" -> IntParam(coreMaxAddrBits),
"dcacheReqTagBits" -> IntParam(roccIo.mem.req.bits.tag.getWidth),
"M_SZ" -> IntParam(M_SZ),
"mem_req_bits_size_width" -> IntParam(roccIo.mem.req.bits.size.getWidth),
"coreDataBits" -> IntParam(coreDataBits),
"coreDataBytes" -> IntParam(coreDataBytes),
"paddrBits" -> IntParam(paddrBits),
"vaddrBitsExtended" -> IntParam(vaddrBitsExtended),
"FPConstants_RM_SZ" -> IntParam(FPConstants.RM_SZ),
"fLen" -> IntParam(fLen),
"FPConstants_FLAGS_SZ" -> IntParam(FPConstants.FLAGS_SZ)
) ) with HasBlackBoxResource {
val io = IO( new Bundle {
val clock = Input(Clock())
val reset = Input(Reset())
val rocc = chiselTypeOf(roccIo)
})
override def desiredName: String = blackBoxFile
addResource(s"/vsrc/$blackBoxFile.v")
}
)
}
blackbox.io.clock := clock
blackbox.io.reset := reset
blackbox.io.rocc.cmd <> io.cmd
io.resp <> blackbox.io.rocc.resp
io.mem <> blackbox.io.rocc.mem
io.busy := blackbox.io.rocc.busy
io.interrupt := blackbox.io.rocc.interrupt
blackbox.io.rocc.exception := io.exception
io.ptw <> blackbox.io.rocc.ptw
io.fpu_req <> blackbox.io.rocc.fpu_req
blackbox.io.rocc.fpu_resp <> io.fpu_resp
}
class OpcodeSet(val opcodes: Seq[UInt]) {
def |(set: OpcodeSet) =
new OpcodeSet(this.opcodes ++ set.opcodes)
def matches(oc: UInt) = opcodes.map(_ === oc).reduce(_ || _)
}
object OpcodeSet {
def custom0 = new OpcodeSet(Seq("b0001011".U))
def custom1 = new OpcodeSet(Seq("b0101011".U))
def custom2 = new OpcodeSet(Seq("b1011011".U))
def custom3 = new OpcodeSet(Seq("b1111011".U))
def all = custom0 | custom1 | custom2 | custom3
}
class RoccCommandRouter(opcodes: Seq[OpcodeSet])(implicit p: Parameters)
extends CoreModule()(p) {
val io = IO(new Bundle {
val in = Flipped(Decoupled(new RoCCCommand))
val out = Vec(opcodes.size, Decoupled(new RoCCCommand))
val busy = Output(Bool())
})
val cmd = Queue(io.in)
val cmdReadys = io.out.zip(opcodes).map { case (out, opcode) =>
val me = opcode.matches(cmd.bits.inst.opcode)
out.valid := cmd.valid && me
out.bits := cmd.bits
out.ready && me
}
cmd.ready := cmdReadys.reduce(_ || _)
io.busy := cmd.valid
assert(PopCount(cmdReadys) <= 1.U,
"Custom opcode matched for more than one accelerator")
}
File WidthWidget.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.diplomacy.AddressSet
import freechips.rocketchip.util.{Repeater, UIntToOH1}
// innBeatBytes => the new client-facing bus width
class TLWidthWidget(innerBeatBytes: Int)(implicit p: Parameters) extends LazyModule
{
private def noChangeRequired(manager: TLManagerPortParameters) = manager.beatBytes == innerBeatBytes
val node = new TLAdapterNode(
clientFn = { case c => c },
managerFn = { case m => m.v1copy(beatBytes = innerBeatBytes) }){
override def circuitIdentity = edges.out.map(_.manager).forall(noChangeRequired)
}
override lazy val desiredName = s"TLWidthWidget$innerBeatBytes"
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
def merge[T <: TLDataChannel](edgeIn: TLEdge, in: DecoupledIO[T], edgeOut: TLEdge, out: DecoupledIO[T]) = {
val inBytes = edgeIn.manager.beatBytes
val outBytes = edgeOut.manager.beatBytes
val ratio = outBytes / inBytes
val keepBits = log2Ceil(outBytes)
val dropBits = log2Ceil(inBytes)
val countBits = log2Ceil(ratio)
val size = edgeIn.size(in.bits)
val hasData = edgeIn.hasData(in.bits)
val limit = UIntToOH1(size, keepBits) >> dropBits
val count = RegInit(0.U(countBits.W))
val first = count === 0.U
val last = count === limit || !hasData
val enable = Seq.tabulate(ratio) { i => !((count ^ i.U) & limit).orR }
val corrupt_reg = RegInit(false.B)
val corrupt_in = edgeIn.corrupt(in.bits)
val corrupt_out = corrupt_in || corrupt_reg
when (in.fire) {
count := count + 1.U
corrupt_reg := corrupt_out
when (last) {
count := 0.U
corrupt_reg := false.B
}
}
def helper(idata: UInt): UInt = {
// rdata is X until the first time a multi-beat write occurs.
// Prevent the X from leaking outside by jamming the mux control until
// the first time rdata is written (and hence no longer X).
val rdata_written_once = RegInit(false.B)
val masked_enable = enable.map(_ || !rdata_written_once)
val odata = Seq.fill(ratio) { WireInit(idata) }
val rdata = Reg(Vec(ratio-1, chiselTypeOf(idata)))
val pdata = rdata :+ idata
val mdata = (masked_enable zip (odata zip pdata)) map { case (e, (o, p)) => Mux(e, o, p) }
when (in.fire && !last) {
rdata_written_once := true.B
(rdata zip mdata) foreach { case (r, m) => r := m }
}
Cat(mdata.reverse)
}
in.ready := out.ready || !last
out.valid := in.valid && last
out.bits := in.bits
// Don't put down hardware if we never carry data
edgeOut.data(out.bits) := (if (edgeIn.staticHasData(in.bits) == Some(false)) 0.U else helper(edgeIn.data(in.bits)))
edgeOut.corrupt(out.bits) := corrupt_out
(out.bits, in.bits) match {
case (o: TLBundleA, i: TLBundleA) => o.mask := edgeOut.mask(o.address, o.size) & Mux(hasData, helper(i.mask), ~0.U(outBytes.W))
case (o: TLBundleB, i: TLBundleB) => o.mask := edgeOut.mask(o.address, o.size) & Mux(hasData, helper(i.mask), ~0.U(outBytes.W))
case (o: TLBundleC, i: TLBundleC) => ()
case (o: TLBundleD, i: TLBundleD) => ()
case _ => require(false, "Impossible bundle combination in WidthWidget")
}
}
def split[T <: TLDataChannel](edgeIn: TLEdge, in: DecoupledIO[T], edgeOut: TLEdge, out: DecoupledIO[T], sourceMap: UInt => UInt) = {
val inBytes = edgeIn.manager.beatBytes
val outBytes = edgeOut.manager.beatBytes
val ratio = inBytes / outBytes
val keepBits = log2Ceil(inBytes)
val dropBits = log2Ceil(outBytes)
val countBits = log2Ceil(ratio)
val size = edgeIn.size(in.bits)
val hasData = edgeIn.hasData(in.bits)
val limit = UIntToOH1(size, keepBits) >> dropBits
val count = RegInit(0.U(countBits.W))
val first = count === 0.U
val last = count === limit || !hasData
when (out.fire) {
count := count + 1.U
when (last) { count := 0.U }
}
// For sub-beat transfer, extract which part matters
val sel = in.bits match {
case a: TLBundleA => a.address(keepBits-1, dropBits)
case b: TLBundleB => b.address(keepBits-1, dropBits)
case c: TLBundleC => c.address(keepBits-1, dropBits)
case d: TLBundleD => {
val sel = sourceMap(d.source)
val hold = Mux(first, sel, RegEnable(sel, first)) // a_first is not for whole xfer
hold & ~limit // if more than one a_first/xfer, the address must be aligned anyway
}
}
val index = sel | count
def helper(idata: UInt, width: Int): UInt = {
val mux = VecInit.tabulate(ratio) { i => idata((i+1)*outBytes*width-1, i*outBytes*width) }
mux(index)
}
out.bits := in.bits
out.valid := in.valid
in.ready := out.ready
// Don't put down hardware if we never carry data
edgeOut.data(out.bits) := (if (edgeIn.staticHasData(in.bits) == Some(false)) 0.U else helper(edgeIn.data(in.bits), 8))
(out.bits, in.bits) match {
case (o: TLBundleA, i: TLBundleA) => o.mask := helper(i.mask, 1)
case (o: TLBundleB, i: TLBundleB) => o.mask := helper(i.mask, 1)
case (o: TLBundleC, i: TLBundleC) => () // replicating corrupt to all beats is ok
case (o: TLBundleD, i: TLBundleD) => ()
case _ => require(false, "Impossbile bundle combination in WidthWidget")
}
// Repeat the input if we're not last
!last
}
def splice[T <: TLDataChannel](edgeIn: TLEdge, in: DecoupledIO[T], edgeOut: TLEdge, out: DecoupledIO[T], sourceMap: UInt => UInt) = {
if (edgeIn.manager.beatBytes == edgeOut.manager.beatBytes) {
// nothing to do; pass it through
out.bits := in.bits
out.valid := in.valid
in.ready := out.ready
} else if (edgeIn.manager.beatBytes > edgeOut.manager.beatBytes) {
// split input to output
val repeat = Wire(Bool())
val repeated = Repeater(in, repeat)
val cated = Wire(chiselTypeOf(repeated))
cated <> repeated
edgeIn.data(cated.bits) := Cat(
edgeIn.data(repeated.bits)(edgeIn.manager.beatBytes*8-1, edgeOut.manager.beatBytes*8),
edgeIn.data(in.bits)(edgeOut.manager.beatBytes*8-1, 0))
repeat := split(edgeIn, cated, edgeOut, out, sourceMap)
} else {
// merge input to output
merge(edgeIn, in, edgeOut, out)
}
}
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
// If the master is narrower than the slave, the D channel must be narrowed.
// This is tricky, because the D channel has no address data.
// Thus, you don't know which part of a sub-beat transfer to extract.
// To fix this, we record the relevant address bits for all sources.
// The assumption is that this sort of situation happens only where
// you connect a narrow master to the system bus, so there are few sources.
def sourceMap(source_bits: UInt) = {
val source = if (edgeIn.client.endSourceId == 1) 0.U(0.W) else source_bits
require (edgeOut.manager.beatBytes > edgeIn.manager.beatBytes)
val keepBits = log2Ceil(edgeOut.manager.beatBytes)
val dropBits = log2Ceil(edgeIn.manager.beatBytes)
val sources = Reg(Vec(edgeIn.client.endSourceId, UInt((keepBits-dropBits).W)))
val a_sel = in.a.bits.address(keepBits-1, dropBits)
when (in.a.fire) {
if (edgeIn.client.endSourceId == 1) { // avoid extraction-index-width warning
sources(0) := a_sel
} else {
sources(in.a.bits.source) := a_sel
}
}
// depopulate unused source registers:
edgeIn.client.unusedSources.foreach { id => sources(id) := 0.U }
val bypass = in.a.valid && in.a.bits.source === source
if (edgeIn.manager.minLatency > 0) sources(source)
else Mux(bypass, a_sel, sources(source))
}
splice(edgeIn, in.a, edgeOut, out.a, sourceMap)
splice(edgeOut, out.d, edgeIn, in.d, sourceMap)
if (edgeOut.manager.anySupportAcquireB && edgeIn.client.anySupportProbe) {
splice(edgeOut, out.b, edgeIn, in.b, sourceMap)
splice(edgeIn, in.c, edgeOut, out.c, sourceMap)
out.e.valid := in.e.valid
out.e.bits := in.e.bits
in.e.ready := out.e.ready
} else {
in.b.valid := false.B
in.c.ready := true.B
in.e.ready := true.B
out.b.ready := true.B
out.c.valid := false.B
out.e.valid := false.B
}
}
}
}
object TLWidthWidget
{
def apply(innerBeatBytes: Int)(implicit p: Parameters): TLNode =
{
val widget = LazyModule(new TLWidthWidget(innerBeatBytes))
widget.node
}
def apply(wrapper: TLBusWrapper)(implicit p: Parameters): TLNode = apply(wrapper.beatBytes)
}
// Synthesizable unit tests
import freechips.rocketchip.unittest._
class TLRAMWidthWidget(first: Int, second: Int, txns: Int)(implicit p: Parameters) extends LazyModule {
val fuzz = LazyModule(new TLFuzzer(txns))
val model = LazyModule(new TLRAMModel("WidthWidget"))
val ram = LazyModule(new TLRAM(AddressSet(0x0, 0x3ff)))
(ram.node
:= TLDelayer(0.1)
:= TLFragmenter(4, 256)
:= TLWidthWidget(second)
:= TLWidthWidget(first)
:= TLDelayer(0.1)
:= model.node
:= fuzz.node)
lazy val module = new Impl
class Impl extends LazyModuleImp(this) with UnitTestModule {
io.finished := fuzz.module.io.finished
}
}
class TLRAMWidthWidgetTest(little: Int, big: Int, txns: Int = 5000, timeout: Int = 500000)(implicit p: Parameters) extends UnitTest(timeout) {
val dut = Module(LazyModule(new TLRAMWidthWidget(little,big,txns)).module)
dut.io.start := DontCare
io.finished := dut.io.finished
}
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
File MixedNode.scala:
package org.chipsalliance.diplomacy.nodes
import chisel3.{Data, DontCare, Wire}
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.{Field, Parameters}
import org.chipsalliance.diplomacy.ValName
import org.chipsalliance.diplomacy.sourceLine
/** One side metadata of a [[Dangle]].
*
* Describes one side of an edge going into or out of a [[BaseNode]].
*
* @param serial
* the global [[BaseNode.serial]] number of the [[BaseNode]] that this [[HalfEdge]] connects to.
* @param index
* the `index` in the [[BaseNode]]'s input or output port list that this [[HalfEdge]] belongs to.
*/
case class HalfEdge(serial: Int, index: Int) extends Ordered[HalfEdge] {
import scala.math.Ordered.orderingToOrdered
def compare(that: HalfEdge): Int = HalfEdge.unapply(this).compare(HalfEdge.unapply(that))
}
/** [[Dangle]] captures the `IO` information of a [[LazyModule]] and which two [[BaseNode]]s the [[Edges]]/[[Bundle]]
* connects.
*
* [[Dangle]]s are generated by [[BaseNode.instantiate]] using [[MixedNode.danglesOut]] and [[MixedNode.danglesIn]] ,
* [[LazyModuleImp.instantiate]] connects those that go to internal or explicit IO connections in a [[LazyModule]].
*
* @param source
* the source [[HalfEdge]] of this [[Dangle]], which captures the source [[BaseNode]] and the port `index` within
* that [[BaseNode]].
* @param sink
* sink [[HalfEdge]] of this [[Dangle]], which captures the sink [[BaseNode]] and the port `index` within that
* [[BaseNode]].
* @param flipped
* flip or not in [[AutoBundle.makeElements]]. If true this corresponds to `danglesOut`, if false it corresponds to
* `danglesIn`.
* @param dataOpt
* actual [[Data]] for the hardware connection. Can be empty if this belongs to a cloned module
*/
case class Dangle(source: HalfEdge, sink: HalfEdge, flipped: Boolean, name: String, dataOpt: Option[Data]) {
def data = dataOpt.get
}
/** [[Edges]] is a collection of parameters describing the functionality and connection for an interface, which is often
* derived from the interconnection protocol and can inform the parameterization of the hardware bundles that actually
* implement the protocol.
*/
case class Edges[EI, EO](in: Seq[EI], out: Seq[EO])
/** A field available in [[Parameters]] used to determine whether [[InwardNodeImp.monitor]] will be called. */
case object MonitorsEnabled extends Field[Boolean](true)
/** When rendering the edge in a graphical format, flip the order in which the edges' source and sink are presented.
*
* For example, when rendering graphML, yEd by default tries to put the source node vertically above the sink node, but
* [[RenderFlipped]] inverts this relationship. When a particular [[LazyModule]] contains both source nodes and sink
* nodes, flipping the rendering of one node's edge will usual produce a more concise visual layout for the
* [[LazyModule]].
*/
case object RenderFlipped extends Field[Boolean](false)
/** The sealed node class in the package, all node are derived from it.
*
* @param inner
* Sink interface implementation.
* @param outer
* Source interface implementation.
* @param valName
* val name of this node.
* @tparam DI
* Downward-flowing parameters received on the inner side of the node. It is usually a brunch of parameters
* describing the protocol parameters from a source. For an [[InwardNode]], it is determined by the connected
* [[OutwardNode]]. Since it can be connected to multiple sources, this parameter is always a Seq of source port
* parameters.
* @tparam UI
* Upward-flowing parameters generated by the inner side of the node. It is usually a brunch of parameters describing
* the protocol parameters of a sink. For an [[InwardNode]], it is determined itself.
* @tparam EI
* Edge Parameters describing a connection on the inner side of the node. It is usually a brunch of transfers
* specified for a sink according to protocol.
* @tparam BI
* Bundle type used when connecting to the inner side of the node. It is a hardware interface of this sink interface.
* It should extends from [[chisel3.Data]], which represents the real hardware.
* @tparam DO
* Downward-flowing parameters generated on the outer side of the node. It is usually a brunch of parameters
* describing the protocol parameters of a source. For an [[OutwardNode]], it is determined itself.
* @tparam UO
* Upward-flowing parameters received by the outer side of the node. It is usually a brunch of parameters describing
* the protocol parameters from a sink. For an [[OutwardNode]], it is determined by the connected [[InwardNode]].
* Since it can be connected to multiple sinks, this parameter is always a Seq of sink port parameters.
* @tparam EO
* Edge Parameters describing a connection on the outer side of the node. It is usually a brunch of transfers
* specified for a source according to protocol.
* @tparam BO
* Bundle type used when connecting to the outer side of the node. It is a hardware interface of this source
* interface. It should extends from [[chisel3.Data]], which represents the real hardware.
*
* @note
* Call Graph of [[MixedNode]]
* - line `─`: source is process by a function and generate pass to others
* - Arrow `→`: target of arrow is generated by source
*
* {{{
* (from the other node)
* ┌─────────────────────────────────────────────────────────[[InwardNode.uiParams]]─────────────┐
* ↓ │
* (binding node when elaboration) [[OutwardNode.uoParams]]────────────────────────[[MixedNode.mapParamsU]]→──────────┐ │
* [[InwardNode.accPI]] │ │ │
* │ │ (based on protocol) │
* │ │ [[MixedNode.inner.edgeI]] │
* │ │ ↓ │
* ↓ │ │ │
* (immobilize after elaboration) (inward port from [[OutwardNode]]) │ ↓ │
* [[InwardNode.iBindings]]──┐ [[MixedNode.iDirectPorts]]────────────────────→[[MixedNode.iPorts]] [[InwardNode.uiParams]] │
* │ │ ↑ │ │ │
* │ │ │ [[OutwardNode.doParams]] │ │
* │ │ │ (from the other node) │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* │ │ │ └────────┬──────────────┤ │
* │ │ │ │ │ │
* │ │ │ │ (based on protocol) │
* │ │ │ │ [[MixedNode.inner.edgeI]] │
* │ │ │ │ │ │
* │ │ (from the other node) │ ↓ │
* │ └───[[OutwardNode.oPortMapping]] [[OutwardNode.oStar]] │ [[MixedNode.edgesIn]]───┐ │
* │ ↑ ↑ │ │ ↓ │
* │ │ │ │ │ [[MixedNode.in]] │
* │ │ │ │ ↓ ↑ │
* │ (solve star connection) │ │ │ [[MixedNode.bundleIn]]──┘ │
* ├───[[MixedNode.resolveStar]]→─┼─────────────────────────────┤ └────────────────────────────────────┐ │
* │ │ │ [[MixedNode.bundleOut]]─┐ │ │
* │ │ │ ↑ ↓ │ │
* │ │ │ │ [[MixedNode.out]] │ │
* │ ↓ ↓ │ ↑ │ │
* │ ┌─────[[InwardNode.iPortMapping]] [[InwardNode.iStar]] [[MixedNode.edgesOut]]──┘ │ │
* │ │ (from the other node) ↑ │ │
* │ │ │ │ │ │
* │ │ │ [[MixedNode.outer.edgeO]] │ │
* │ │ │ (based on protocol) │ │
* │ │ │ │ │ │
* │ │ │ ┌────────────────────────────────────────┤ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* (immobilize after elaboration)│ ↓ │ │ │ │
* [[OutwardNode.oBindings]]─┘ [[MixedNode.oDirectPorts]]───→[[MixedNode.oPorts]] [[OutwardNode.doParams]] │ │
* ↑ (inward port from [[OutwardNode]]) │ │ │ │
* │ ┌─────────────────────────────────────────┤ │ │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* [[OutwardNode.accPO]] │ ↓ │ │ │
* (binding node when elaboration) │ [[InwardNode.diParams]]─────→[[MixedNode.mapParamsD]]────────────────────────────┘ │ │
* │ ↑ │ │
* │ └──────────────────────────────────────────────────────────────────────────────────────────┘ │
* └──────────────────────────────────────────────────────────────────────────────────────────────────────────┘
* }}}
*/
abstract class MixedNode[DI, UI, EI, BI <: Data, DO, UO, EO, BO <: Data](
val inner: InwardNodeImp[DI, UI, EI, BI],
val outer: OutwardNodeImp[DO, UO, EO, BO]
)(
implicit valName: ValName)
extends BaseNode
with NodeHandle[DI, UI, EI, BI, DO, UO, EO, BO]
with InwardNode[DI, UI, BI]
with OutwardNode[DO, UO, BO] {
// Generate a [[NodeHandle]] with inward and outward node are both this node.
val inward = this
val outward = this
/** Debug info of nodes binding. */
def bindingInfo: String = s"""$iBindingInfo
|$oBindingInfo
|""".stripMargin
/** Debug info of ports connecting. */
def connectedPortsInfo: String = s"""${oPorts.size} outward ports connected: [${oPorts.map(_._2.name).mkString(",")}]
|${iPorts.size} inward ports connected: [${iPorts.map(_._2.name).mkString(",")}]
|""".stripMargin
/** Debug info of parameters propagations. */
def parametersInfo: String = s"""${doParams.size} downstream outward parameters: [${doParams.mkString(",")}]
|${uoParams.size} upstream outward parameters: [${uoParams.mkString(",")}]
|${diParams.size} downstream inward parameters: [${diParams.mkString(",")}]
|${uiParams.size} upstream inward parameters: [${uiParams.mkString(",")}]
|""".stripMargin
/** For a given node, converts [[OutwardNode.accPO]] and [[InwardNode.accPI]] to [[MixedNode.oPortMapping]] and
* [[MixedNode.iPortMapping]].
*
* Given counts of known inward and outward binding and inward and outward star bindings, return the resolved inward
* stars and outward stars.
*
* This method will also validate the arguments and throw a runtime error if the values are unsuitable for this type
* of node.
*
* @param iKnown
* Number of known-size ([[BIND_ONCE]]) input bindings.
* @param oKnown
* Number of known-size ([[BIND_ONCE]]) output bindings.
* @param iStar
* Number of unknown size ([[BIND_STAR]]) input bindings.
* @param oStar
* Number of unknown size ([[BIND_STAR]]) output bindings.
* @return
* A Tuple of the resolved number of input and output connections.
*/
protected[diplomacy] def resolveStar(iKnown: Int, oKnown: Int, iStar: Int, oStar: Int): (Int, Int)
/** Function to generate downward-flowing outward params from the downward-flowing input params and the current output
* ports.
*
* @param n
* The size of the output sequence to generate.
* @param p
* Sequence of downward-flowing input parameters of this node.
* @return
* A `n`-sized sequence of downward-flowing output edge parameters.
*/
protected[diplomacy] def mapParamsD(n: Int, p: Seq[DI]): Seq[DO]
/** Function to generate upward-flowing input parameters from the upward-flowing output parameters [[uiParams]].
*
* @param n
* Size of the output sequence.
* @param p
* Upward-flowing output edge parameters.
* @return
* A n-sized sequence of upward-flowing input edge parameters.
*/
protected[diplomacy] def mapParamsU(n: Int, p: Seq[UO]): Seq[UI]
/** @return
* The sink cardinality of the node, the number of outputs bound with [[BIND_QUERY]] summed with inputs bound with
* [[BIND_STAR]].
*/
protected[diplomacy] lazy val sinkCard: Int = oBindings.count(_._3 == BIND_QUERY) + iBindings.count(_._3 == BIND_STAR)
/** @return
* The source cardinality of this node, the number of inputs bound with [[BIND_QUERY]] summed with the number of
* output bindings bound with [[BIND_STAR]].
*/
protected[diplomacy] lazy val sourceCard: Int =
iBindings.count(_._3 == BIND_QUERY) + oBindings.count(_._3 == BIND_STAR)
/** @return list of nodes involved in flex bindings with this node. */
protected[diplomacy] lazy val flexes: Seq[BaseNode] =
oBindings.filter(_._3 == BIND_FLEX).map(_._2) ++ iBindings.filter(_._3 == BIND_FLEX).map(_._2)
/** Resolves the flex to be either source or sink and returns the offset where the [[BIND_STAR]] operators begin
* greedily taking up the remaining connections.
*
* @return
* A value >= 0 if it is sink cardinality, a negative value for source cardinality. The magnitude of the return
* value is not relevant.
*/
protected[diplomacy] lazy val flexOffset: Int = {
/** Recursively performs a depth-first search of the [[flexes]], [[BaseNode]]s connected to this node with flex
* operators. The algorithm bottoms out when we either get to a node we have already visited or when we get to a
* connection that is not a flex and can set the direction for us. Otherwise, recurse by visiting the `flexes` of
* each node in the current set and decide whether they should be added to the set or not.
*
* @return
* the mapping of [[BaseNode]] indexed by their serial numbers.
*/
def DFS(v: BaseNode, visited: Map[Int, BaseNode]): Map[Int, BaseNode] = {
if (visited.contains(v.serial) || !v.flexibleArityDirection) {
visited
} else {
v.flexes.foldLeft(visited + (v.serial -> v))((sum, n) => DFS(n, sum))
}
}
/** Determine which [[BaseNode]] are involved in resolving the flex connections to/from this node.
*
* @example
* {{{
* a :*=* b :*=* c
* d :*=* b
* e :*=* f
* }}}
*
* `flexSet` for `a`, `b`, `c`, or `d` will be `Set(a, b, c, d)` `flexSet` for `e` or `f` will be `Set(e,f)`
*/
val flexSet = DFS(this, Map()).values
/** The total number of :*= operators where we're on the left. */
val allSink = flexSet.map(_.sinkCard).sum
/** The total number of :=* operators used when we're on the right. */
val allSource = flexSet.map(_.sourceCard).sum
require(
allSink == 0 || allSource == 0,
s"The nodes ${flexSet.map(_.name)} which are inter-connected by :*=* have ${allSink} :*= operators and ${allSource} :=* operators connected to them, making it impossible to determine cardinality inference direction."
)
allSink - allSource
}
/** @return A value >= 0 if it is sink cardinality, a negative value for source cardinality. */
protected[diplomacy] def edgeArityDirection(n: BaseNode): Int = {
if (flexibleArityDirection) flexOffset
else if (n.flexibleArityDirection) n.flexOffset
else 0
}
/** For a node which is connected between two nodes, select the one that will influence the direction of the flex
* resolution.
*/
protected[diplomacy] def edgeAritySelect(n: BaseNode, l: => Int, r: => Int): Int = {
val dir = edgeArityDirection(n)
if (dir < 0) l
else if (dir > 0) r
else 1
}
/** Ensure that the same node is not visited twice in resolving `:*=`, etc operators. */
private var starCycleGuard = false
/** Resolve all the star operators into concrete indicies. As connections are being made, some may be "star"
* connections which need to be resolved. In some way to determine how many actual edges they correspond to. We also
* need to build up the ranges of edges which correspond to each binding operator, so that We can apply the correct
* edge parameters and later build up correct bundle connections.
*
* [[oPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that oPort (binding
* operator). [[iPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that iPort
* (binding operator). [[oStar]]: `Int` the value to return for this node `N` for any `N :*= foo` or `N :*=* foo :*=
* bar` [[iStar]]: `Int` the value to return for this node `N` for any `foo :=* N` or `bar :=* foo :*=* N`
*/
protected[diplomacy] lazy val (
oPortMapping: Seq[(Int, Int)],
iPortMapping: Seq[(Int, Int)],
oStar: Int,
iStar: Int
) = {
try {
if (starCycleGuard) throw StarCycleException()
starCycleGuard = true
// For a given node N...
// Number of foo :=* N
// + Number of bar :=* foo :*=* N
val oStars = oBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) < 0)
}
// Number of N :*= foo
// + Number of N :*=* foo :*= bar
val iStars = iBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) > 0)
}
// 1 for foo := N
// + bar.iStar for bar :*= foo :*=* N
// + foo.iStar for foo :*= N
// + 0 for foo :=* N
val oKnown = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, 0, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => 0
}
}.sum
// 1 for N := foo
// + bar.oStar for N :*=* foo :=* bar
// + foo.oStar for N :=* foo
// + 0 for N :*= foo
val iKnown = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, 0)
case BIND_QUERY => n.oStar
case BIND_STAR => 0
}
}.sum
// Resolve star depends on the node subclass to implement the algorithm for this.
val (iStar, oStar) = resolveStar(iKnown, oKnown, iStars, oStars)
// Cumulative list of resolved outward binding range starting points
val oSum = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, oStar, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => oStar
}
}.scanLeft(0)(_ + _)
// Cumulative list of resolved inward binding range starting points
val iSum = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, iStar)
case BIND_QUERY => n.oStar
case BIND_STAR => iStar
}
}.scanLeft(0)(_ + _)
// Create ranges for each binding based on the running sums and return
// those along with resolved values for the star operations.
(oSum.init.zip(oSum.tail), iSum.init.zip(iSum.tail), oStar, iStar)
} catch {
case c: StarCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Sequence of inward ports.
*
* This should be called after all star bindings are resolved.
*
* Each element is: `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding.
* `n` Instance of inward node. `p` View of [[Parameters]] where this connection was made. `s` Source info where this
* connection was made in the source code.
*/
protected[diplomacy] lazy val oDirectPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] =
oBindings.flatMap { case (i, n, _, p, s) =>
// for each binding operator in this node, look at what it connects to
val (start, end) = n.iPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
/** Sequence of outward ports.
*
* This should be called after all star bindings are resolved.
*
* `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. `n` Instance of
* outward node. `p` View of [[Parameters]] where this connection was made. `s` [[SourceInfo]] where this connection
* was made in the source code.
*/
protected[diplomacy] lazy val iDirectPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] =
iBindings.flatMap { case (i, n, _, p, s) =>
// query this port index range of this node in the other side of node.
val (start, end) = n.oPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
// Ephemeral nodes ( which have non-None iForward/oForward) have in_degree = out_degree
// Thus, there must exist an Eulerian path and the below algorithms terminate
@scala.annotation.tailrec
private def oTrace(
tuple: (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)
): (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.iForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => oTrace((j, m, p, s))
}
}
@scala.annotation.tailrec
private def iTrace(
tuple: (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)
): (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.oForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => iTrace((j, m, p, s))
}
}
/** Final output ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - Numeric index of this binding in the [[InwardNode]] on the other end.
* - [[InwardNode]] on the other end of this binding.
* - A view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val oPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oDirectPorts.map(oTrace)
/** Final input ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - numeric index of this binding in [[OutwardNode]] on the other end.
* - [[OutwardNode]] on the other end of this binding.
* - a view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val iPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iDirectPorts.map(iTrace)
private var oParamsCycleGuard = false
protected[diplomacy] lazy val diParams: Seq[DI] = iPorts.map { case (i, n, _, _) => n.doParams(i) }
protected[diplomacy] lazy val doParams: Seq[DO] = {
try {
if (oParamsCycleGuard) throw DownwardCycleException()
oParamsCycleGuard = true
val o = mapParamsD(oPorts.size, diParams)
require(
o.size == oPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of outward ports should equal the number of produced outward parameters.
|$context
|$connectedPortsInfo
|Downstreamed inward parameters: [${diParams.mkString(",")}]
|Produced outward parameters: [${o.mkString(",")}]
|""".stripMargin
)
o.map(outer.mixO(_, this))
} catch {
case c: DownwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
private var iParamsCycleGuard = false
protected[diplomacy] lazy val uoParams: Seq[UO] = oPorts.map { case (o, n, _, _) => n.uiParams(o) }
protected[diplomacy] lazy val uiParams: Seq[UI] = {
try {
if (iParamsCycleGuard) throw UpwardCycleException()
iParamsCycleGuard = true
val i = mapParamsU(iPorts.size, uoParams)
require(
i.size == iPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of inward ports should equal the number of produced inward parameters.
|$context
|$connectedPortsInfo
|Upstreamed outward parameters: [${uoParams.mkString(",")}]
|Produced inward parameters: [${i.mkString(",")}]
|""".stripMargin
)
i.map(inner.mixI(_, this))
} catch {
case c: UpwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Outward edge parameters. */
protected[diplomacy] lazy val edgesOut: Seq[EO] =
(oPorts.zip(doParams)).map { case ((i, n, p, s), o) => outer.edgeO(o, n.uiParams(i), p, s) }
/** Inward edge parameters. */
protected[diplomacy] lazy val edgesIn: Seq[EI] =
(iPorts.zip(uiParams)).map { case ((o, n, p, s), i) => inner.edgeI(n.doParams(o), i, p, s) }
/** A tuple of the input edge parameters and output edge parameters for the edges bound to this node.
*
* If you need to access to the edges of a foreign Node, use this method (in/out create bundles).
*/
lazy val edges: Edges[EI, EO] = Edges(edgesIn, edgesOut)
/** Create actual Wires corresponding to the Bundles parameterized by the outward edges of this node. */
protected[diplomacy] lazy val bundleOut: Seq[BO] = edgesOut.map { e =>
val x = Wire(outer.bundleO(e)).suggestName(s"${valName.value}Out")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
/** Create actual Wires corresponding to the Bundles parameterized by the inward edges of this node. */
protected[diplomacy] lazy val bundleIn: Seq[BI] = edgesIn.map { e =>
val x = Wire(inner.bundleI(e)).suggestName(s"${valName.value}In")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
private def emptyDanglesOut: Seq[Dangle] = oPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(serial, i),
sink = HalfEdge(n.serial, j),
flipped = false,
name = wirePrefix + "out",
dataOpt = None
)
}
private def emptyDanglesIn: Seq[Dangle] = iPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(n.serial, j),
sink = HalfEdge(serial, i),
flipped = true,
name = wirePrefix + "in",
dataOpt = None
)
}
/** Create the [[Dangle]]s which describe the connections from this node output to other nodes inputs. */
protected[diplomacy] def danglesOut: Seq[Dangle] = emptyDanglesOut.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleOut(i)))
}
/** Create the [[Dangle]]s which describe the connections from this node input from other nodes outputs. */
protected[diplomacy] def danglesIn: Seq[Dangle] = emptyDanglesIn.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleIn(i)))
}
private[diplomacy] var instantiated = false
/** Gather Bundle and edge parameters of outward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def out: Seq[(BO, EO)] = {
require(
instantiated,
s"$name.out should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleOut.zip(edgesOut)
}
/** Gather Bundle and edge parameters of inward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def in: Seq[(BI, EI)] = {
require(
instantiated,
s"$name.in should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleIn.zip(edgesIn)
}
/** Actually instantiate this node during [[LazyModuleImp]] evaluation. Mark that it's safe to use the Bundle wires,
* instantiate monitors on all input ports if appropriate, and return all the dangles of this node.
*/
protected[diplomacy] def instantiate(): Seq[Dangle] = {
instantiated = true
if (!circuitIdentity) {
(iPorts.zip(in)).foreach { case ((_, _, p, _), (b, e)) => if (p(MonitorsEnabled)) inner.monitor(b, e) }
}
danglesOut ++ danglesIn
}
protected[diplomacy] def cloneDangles(): Seq[Dangle] = emptyDanglesOut ++ emptyDanglesIn
/** Connects the outward part of a node with the inward part of this node. */
protected[diplomacy] def bind(
h: OutwardNode[DI, UI, BI],
binding: NodeBinding
)(
implicit p: Parameters,
sourceInfo: SourceInfo
): Unit = {
val x = this // x := y
val y = h
sourceLine(sourceInfo, " at ", "")
val i = x.iPushed
val o = y.oPushed
y.oPush(
i,
x,
binding match {
case BIND_ONCE => BIND_ONCE
case BIND_FLEX => BIND_FLEX
case BIND_STAR => BIND_QUERY
case BIND_QUERY => BIND_STAR
}
)
x.iPush(o, y, binding)
}
/* Metadata for printing the node graph. */
def inputs: Seq[(OutwardNode[DI, UI, BI], RenderedEdge)] = (iPorts.zip(edgesIn)).map { case ((_, n, p, _), e) =>
val re = inner.render(e)
(n, re.copy(flipped = re.flipped != p(RenderFlipped)))
}
/** Metadata for printing the node graph */
def outputs: Seq[(InwardNode[DO, UO, BO], RenderedEdge)] = oPorts.map { case (i, n, _, _) => (n, n.inputs(i)._2) }
}
| module AES256ECBAccel( // @[Top.scala:30:7]
input clock, // @[Top.scala:30:7]
input reset, // @[Top.scala:30:7]
input auto_atl_out_1_a_ready, // @[LazyModuleImp.scala:107:25]
output auto_atl_out_1_a_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_atl_out_1_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_atl_out_1_a_bits_param, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_atl_out_1_a_bits_size, // @[LazyModuleImp.scala:107:25]
output [4:0] auto_atl_out_1_a_bits_source, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_atl_out_1_a_bits_address, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_atl_out_1_a_bits_mask, // @[LazyModuleImp.scala:107:25]
output [255:0] auto_atl_out_1_a_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_atl_out_1_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output auto_atl_out_1_d_ready, // @[LazyModuleImp.scala:107:25]
input auto_atl_out_1_d_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_atl_out_1_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_atl_out_1_d_bits_param, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_atl_out_1_d_bits_size, // @[LazyModuleImp.scala:107:25]
input [4:0] auto_atl_out_1_d_bits_source, // @[LazyModuleImp.scala:107:25]
input [4:0] auto_atl_out_1_d_bits_sink, // @[LazyModuleImp.scala:107:25]
input auto_atl_out_1_d_bits_denied, // @[LazyModuleImp.scala:107:25]
input [255:0] auto_atl_out_1_d_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_atl_out_1_d_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_atl_out_0_a_ready, // @[LazyModuleImp.scala:107:25]
output auto_atl_out_0_a_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_atl_out_0_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_atl_out_0_a_bits_param, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_atl_out_0_a_bits_size, // @[LazyModuleImp.scala:107:25]
output [4:0] auto_atl_out_0_a_bits_source, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_atl_out_0_a_bits_address, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_atl_out_0_a_bits_mask, // @[LazyModuleImp.scala:107:25]
output [255:0] auto_atl_out_0_a_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_atl_out_0_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output auto_atl_out_0_d_ready, // @[LazyModuleImp.scala:107:25]
input auto_atl_out_0_d_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_atl_out_0_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_atl_out_0_d_bits_param, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_atl_out_0_d_bits_size, // @[LazyModuleImp.scala:107:25]
input [4:0] auto_atl_out_0_d_bits_source, // @[LazyModuleImp.scala:107:25]
input [4:0] auto_atl_out_0_d_bits_sink, // @[LazyModuleImp.scala:107:25]
input auto_atl_out_0_d_bits_denied, // @[LazyModuleImp.scala:107:25]
input [255:0] auto_atl_out_0_d_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_atl_out_0_d_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output io_cmd_ready, // @[LazyRoCC.scala:78:14]
input io_cmd_valid, // @[LazyRoCC.scala:78:14]
input [6:0] io_cmd_bits_inst_funct, // @[LazyRoCC.scala:78:14]
input [4:0] io_cmd_bits_inst_rs2, // @[LazyRoCC.scala:78:14]
input [4:0] io_cmd_bits_inst_rs1, // @[LazyRoCC.scala:78:14]
input io_cmd_bits_inst_xd, // @[LazyRoCC.scala:78:14]
input io_cmd_bits_inst_xs1, // @[LazyRoCC.scala:78:14]
input io_cmd_bits_inst_xs2, // @[LazyRoCC.scala:78:14]
input [4:0] io_cmd_bits_inst_rd, // @[LazyRoCC.scala:78:14]
input [6:0] io_cmd_bits_inst_opcode, // @[LazyRoCC.scala:78:14]
input [63:0] io_cmd_bits_rs1, // @[LazyRoCC.scala:78:14]
input [63:0] io_cmd_bits_rs2, // @[LazyRoCC.scala:78:14]
input io_cmd_bits_status_debug, // @[LazyRoCC.scala:78:14]
input io_cmd_bits_status_cease, // @[LazyRoCC.scala:78:14]
input io_cmd_bits_status_wfi, // @[LazyRoCC.scala:78:14]
input [31:0] io_cmd_bits_status_isa, // @[LazyRoCC.scala:78:14]
input [1:0] io_cmd_bits_status_dprv, // @[LazyRoCC.scala:78:14]
input io_cmd_bits_status_dv, // @[LazyRoCC.scala:78:14]
input [1:0] io_cmd_bits_status_prv, // @[LazyRoCC.scala:78:14]
input io_cmd_bits_status_v, // @[LazyRoCC.scala:78:14]
input io_cmd_bits_status_sd, // @[LazyRoCC.scala:78:14]
input [22:0] io_cmd_bits_status_zero2, // @[LazyRoCC.scala:78:14]
input io_cmd_bits_status_mpv, // @[LazyRoCC.scala:78:14]
input io_cmd_bits_status_gva, // @[LazyRoCC.scala:78:14]
input io_cmd_bits_status_mbe, // @[LazyRoCC.scala:78:14]
input io_cmd_bits_status_sbe, // @[LazyRoCC.scala:78:14]
input [1:0] io_cmd_bits_status_sxl, // @[LazyRoCC.scala:78:14]
input [1:0] io_cmd_bits_status_uxl, // @[LazyRoCC.scala:78:14]
input io_cmd_bits_status_sd_rv32, // @[LazyRoCC.scala:78:14]
input [7:0] io_cmd_bits_status_zero1, // @[LazyRoCC.scala:78:14]
input io_cmd_bits_status_tsr, // @[LazyRoCC.scala:78:14]
input io_cmd_bits_status_tw, // @[LazyRoCC.scala:78:14]
input io_cmd_bits_status_tvm, // @[LazyRoCC.scala:78:14]
input io_cmd_bits_status_mxr, // @[LazyRoCC.scala:78:14]
input io_cmd_bits_status_sum, // @[LazyRoCC.scala:78:14]
input io_cmd_bits_status_mprv, // @[LazyRoCC.scala:78:14]
input [1:0] io_cmd_bits_status_xs, // @[LazyRoCC.scala:78:14]
input [1:0] io_cmd_bits_status_fs, // @[LazyRoCC.scala:78:14]
input [1:0] io_cmd_bits_status_mpp, // @[LazyRoCC.scala:78:14]
input [1:0] io_cmd_bits_status_vs, // @[LazyRoCC.scala:78:14]
input io_cmd_bits_status_spp, // @[LazyRoCC.scala:78:14]
input io_cmd_bits_status_mpie, // @[LazyRoCC.scala:78:14]
input io_cmd_bits_status_ube, // @[LazyRoCC.scala:78:14]
input io_cmd_bits_status_spie, // @[LazyRoCC.scala:78:14]
input io_cmd_bits_status_upie, // @[LazyRoCC.scala:78:14]
input io_cmd_bits_status_mie, // @[LazyRoCC.scala:78:14]
input io_cmd_bits_status_hie, // @[LazyRoCC.scala:78:14]
input io_cmd_bits_status_sie, // @[LazyRoCC.scala:78:14]
input io_cmd_bits_status_uie, // @[LazyRoCC.scala:78:14]
input io_resp_ready, // @[LazyRoCC.scala:78:14]
output io_resp_valid, // @[LazyRoCC.scala:78:14]
output [4:0] io_resp_bits_rd, // @[LazyRoCC.scala:78:14]
output [63:0] io_resp_bits_data, // @[LazyRoCC.scala:78:14]
input io_mem_req_ready, // @[LazyRoCC.scala:78:14]
input io_mem_resp_valid, // @[LazyRoCC.scala:78:14]
input [39:0] io_mem_resp_bits_addr, // @[LazyRoCC.scala:78:14]
input [7:0] io_mem_resp_bits_tag, // @[LazyRoCC.scala:78:14]
input [4:0] io_mem_resp_bits_cmd, // @[LazyRoCC.scala:78:14]
input [1:0] io_mem_resp_bits_size, // @[LazyRoCC.scala:78:14]
input io_mem_resp_bits_signed, // @[LazyRoCC.scala:78:14]
input [1:0] io_mem_resp_bits_dprv, // @[LazyRoCC.scala:78:14]
input io_mem_resp_bits_dv, // @[LazyRoCC.scala:78:14]
input [63:0] io_mem_resp_bits_data, // @[LazyRoCC.scala:78:14]
input [7:0] io_mem_resp_bits_mask, // @[LazyRoCC.scala:78:14]
input io_mem_resp_bits_replay, // @[LazyRoCC.scala:78:14]
input io_mem_resp_bits_has_data, // @[LazyRoCC.scala:78:14]
input [63:0] io_mem_resp_bits_data_word_bypass, // @[LazyRoCC.scala:78:14]
input [63:0] io_mem_resp_bits_data_raw, // @[LazyRoCC.scala:78:14]
input [63:0] io_mem_resp_bits_store_data, // @[LazyRoCC.scala:78:14]
input io_exception, // @[LazyRoCC.scala:78:14]
input io_ptw_0_req_ready, // @[LazyRoCC.scala:78:14]
output io_ptw_0_req_valid, // @[LazyRoCC.scala:78:14]
output [26:0] io_ptw_0_req_bits_bits_addr, // @[LazyRoCC.scala:78:14]
output io_ptw_0_req_bits_bits_need_gpa, // @[LazyRoCC.scala:78:14]
input io_ptw_0_resp_valid, // @[LazyRoCC.scala:78:14]
input io_ptw_0_resp_bits_ae_ptw, // @[LazyRoCC.scala:78:14]
input io_ptw_0_resp_bits_ae_final, // @[LazyRoCC.scala:78:14]
input io_ptw_0_resp_bits_pf, // @[LazyRoCC.scala:78:14]
input io_ptw_0_resp_bits_gf, // @[LazyRoCC.scala:78:14]
input io_ptw_0_resp_bits_hr, // @[LazyRoCC.scala:78:14]
input io_ptw_0_resp_bits_hw, // @[LazyRoCC.scala:78:14]
input io_ptw_0_resp_bits_hx, // @[LazyRoCC.scala:78:14]
input [9:0] io_ptw_0_resp_bits_pte_reserved_for_future, // @[LazyRoCC.scala:78:14]
input [43:0] io_ptw_0_resp_bits_pte_ppn, // @[LazyRoCC.scala:78:14]
input [1:0] io_ptw_0_resp_bits_pte_reserved_for_software, // @[LazyRoCC.scala:78:14]
input io_ptw_0_resp_bits_pte_d, // @[LazyRoCC.scala:78:14]
input io_ptw_0_resp_bits_pte_a, // @[LazyRoCC.scala:78:14]
input io_ptw_0_resp_bits_pte_g, // @[LazyRoCC.scala:78:14]
input io_ptw_0_resp_bits_pte_u, // @[LazyRoCC.scala:78:14]
input io_ptw_0_resp_bits_pte_x, // @[LazyRoCC.scala:78:14]
input io_ptw_0_resp_bits_pte_w, // @[LazyRoCC.scala:78:14]
input io_ptw_0_resp_bits_pte_r, // @[LazyRoCC.scala:78:14]
input io_ptw_0_resp_bits_pte_v, // @[LazyRoCC.scala:78:14]
input [1:0] io_ptw_0_resp_bits_level, // @[LazyRoCC.scala:78:14]
input io_ptw_0_resp_bits_homogeneous, // @[LazyRoCC.scala:78:14]
input io_ptw_0_resp_bits_gpa_valid, // @[LazyRoCC.scala:78:14]
input [38:0] io_ptw_0_resp_bits_gpa_bits, // @[LazyRoCC.scala:78:14]
input io_ptw_0_resp_bits_gpa_is_pte, // @[LazyRoCC.scala:78:14]
input [3:0] io_ptw_0_ptbr_mode, // @[LazyRoCC.scala:78:14]
input [43:0] io_ptw_0_ptbr_ppn, // @[LazyRoCC.scala:78:14]
input io_ptw_0_status_debug, // @[LazyRoCC.scala:78:14]
input io_ptw_0_status_cease, // @[LazyRoCC.scala:78:14]
input io_ptw_0_status_wfi, // @[LazyRoCC.scala:78:14]
input [31:0] io_ptw_0_status_isa, // @[LazyRoCC.scala:78:14]
input [1:0] io_ptw_0_status_dprv, // @[LazyRoCC.scala:78:14]
input io_ptw_0_status_dv, // @[LazyRoCC.scala:78:14]
input [1:0] io_ptw_0_status_prv, // @[LazyRoCC.scala:78:14]
input io_ptw_0_status_v, // @[LazyRoCC.scala:78:14]
input io_ptw_0_status_mpv, // @[LazyRoCC.scala:78:14]
input io_ptw_0_status_gva, // @[LazyRoCC.scala:78:14]
input io_ptw_0_status_tsr, // @[LazyRoCC.scala:78:14]
input io_ptw_0_status_tw, // @[LazyRoCC.scala:78:14]
input io_ptw_0_status_tvm, // @[LazyRoCC.scala:78:14]
input io_ptw_0_status_mxr, // @[LazyRoCC.scala:78:14]
input io_ptw_0_status_sum, // @[LazyRoCC.scala:78:14]
input io_ptw_0_status_mprv, // @[LazyRoCC.scala:78:14]
input [1:0] io_ptw_0_status_fs, // @[LazyRoCC.scala:78:14]
input [1:0] io_ptw_0_status_mpp, // @[LazyRoCC.scala:78:14]
input io_ptw_0_status_spp, // @[LazyRoCC.scala:78:14]
input io_ptw_0_status_mpie, // @[LazyRoCC.scala:78:14]
input io_ptw_0_status_spie, // @[LazyRoCC.scala:78:14]
input io_ptw_0_status_mie, // @[LazyRoCC.scala:78:14]
input io_ptw_0_status_sie, // @[LazyRoCC.scala:78:14]
input io_ptw_0_hstatus_spvp, // @[LazyRoCC.scala:78:14]
input io_ptw_0_hstatus_spv, // @[LazyRoCC.scala:78:14]
input io_ptw_0_hstatus_gva, // @[LazyRoCC.scala:78:14]
input io_ptw_0_gstatus_debug, // @[LazyRoCC.scala:78:14]
input io_ptw_0_gstatus_cease, // @[LazyRoCC.scala:78:14]
input io_ptw_0_gstatus_wfi, // @[LazyRoCC.scala:78:14]
input [31:0] io_ptw_0_gstatus_isa, // @[LazyRoCC.scala:78:14]
input [1:0] io_ptw_0_gstatus_dprv, // @[LazyRoCC.scala:78:14]
input io_ptw_0_gstatus_dv, // @[LazyRoCC.scala:78:14]
input [1:0] io_ptw_0_gstatus_prv, // @[LazyRoCC.scala:78:14]
input io_ptw_0_gstatus_v, // @[LazyRoCC.scala:78:14]
input [22:0] io_ptw_0_gstatus_zero2, // @[LazyRoCC.scala:78:14]
input io_ptw_0_gstatus_mpv, // @[LazyRoCC.scala:78:14]
input io_ptw_0_gstatus_gva, // @[LazyRoCC.scala:78:14]
input io_ptw_0_gstatus_mbe, // @[LazyRoCC.scala:78:14]
input io_ptw_0_gstatus_sbe, // @[LazyRoCC.scala:78:14]
input [1:0] io_ptw_0_gstatus_sxl, // @[LazyRoCC.scala:78:14]
input [7:0] io_ptw_0_gstatus_zero1, // @[LazyRoCC.scala:78:14]
input io_ptw_0_gstatus_tsr, // @[LazyRoCC.scala:78:14]
input io_ptw_0_gstatus_tw, // @[LazyRoCC.scala:78:14]
input io_ptw_0_gstatus_tvm, // @[LazyRoCC.scala:78:14]
input io_ptw_0_gstatus_mxr, // @[LazyRoCC.scala:78:14]
input io_ptw_0_gstatus_sum, // @[LazyRoCC.scala:78:14]
input io_ptw_0_gstatus_mprv, // @[LazyRoCC.scala:78:14]
input [1:0] io_ptw_0_gstatus_fs, // @[LazyRoCC.scala:78:14]
input [1:0] io_ptw_0_gstatus_mpp, // @[LazyRoCC.scala:78:14]
input [1:0] io_ptw_0_gstatus_vs, // @[LazyRoCC.scala:78:14]
input io_ptw_0_gstatus_spp, // @[LazyRoCC.scala:78:14]
input io_ptw_0_gstatus_mpie, // @[LazyRoCC.scala:78:14]
input io_ptw_0_gstatus_ube, // @[LazyRoCC.scala:78:14]
input io_ptw_0_gstatus_spie, // @[LazyRoCC.scala:78:14]
input io_ptw_0_gstatus_upie, // @[LazyRoCC.scala:78:14]
input io_ptw_0_gstatus_mie, // @[LazyRoCC.scala:78:14]
input io_ptw_0_gstatus_hie, // @[LazyRoCC.scala:78:14]
input io_ptw_0_gstatus_sie, // @[LazyRoCC.scala:78:14]
input io_ptw_0_gstatus_uie, // @[LazyRoCC.scala:78:14]
input io_ptw_0_pmp_0_cfg_l, // @[LazyRoCC.scala:78:14]
input [1:0] io_ptw_0_pmp_0_cfg_a, // @[LazyRoCC.scala:78:14]
input io_ptw_0_pmp_0_cfg_x, // @[LazyRoCC.scala:78:14]
input io_ptw_0_pmp_0_cfg_w, // @[LazyRoCC.scala:78:14]
input io_ptw_0_pmp_0_cfg_r, // @[LazyRoCC.scala:78:14]
input [29:0] io_ptw_0_pmp_0_addr, // @[LazyRoCC.scala:78:14]
input [31:0] io_ptw_0_pmp_0_mask, // @[LazyRoCC.scala:78:14]
input io_ptw_0_pmp_1_cfg_l, // @[LazyRoCC.scala:78:14]
input [1:0] io_ptw_0_pmp_1_cfg_a, // @[LazyRoCC.scala:78:14]
input io_ptw_0_pmp_1_cfg_x, // @[LazyRoCC.scala:78:14]
input io_ptw_0_pmp_1_cfg_w, // @[LazyRoCC.scala:78:14]
input io_ptw_0_pmp_1_cfg_r, // @[LazyRoCC.scala:78:14]
input [29:0] io_ptw_0_pmp_1_addr, // @[LazyRoCC.scala:78:14]
input [31:0] io_ptw_0_pmp_1_mask, // @[LazyRoCC.scala:78:14]
input io_ptw_0_pmp_2_cfg_l, // @[LazyRoCC.scala:78:14]
input [1:0] io_ptw_0_pmp_2_cfg_a, // @[LazyRoCC.scala:78:14]
input io_ptw_0_pmp_2_cfg_x, // @[LazyRoCC.scala:78:14]
input io_ptw_0_pmp_2_cfg_w, // @[LazyRoCC.scala:78:14]
input io_ptw_0_pmp_2_cfg_r, // @[LazyRoCC.scala:78:14]
input [29:0] io_ptw_0_pmp_2_addr, // @[LazyRoCC.scala:78:14]
input [31:0] io_ptw_0_pmp_2_mask, // @[LazyRoCC.scala:78:14]
input io_ptw_0_pmp_3_cfg_l, // @[LazyRoCC.scala:78:14]
input [1:0] io_ptw_0_pmp_3_cfg_a, // @[LazyRoCC.scala:78:14]
input io_ptw_0_pmp_3_cfg_x, // @[LazyRoCC.scala:78:14]
input io_ptw_0_pmp_3_cfg_w, // @[LazyRoCC.scala:78:14]
input io_ptw_0_pmp_3_cfg_r, // @[LazyRoCC.scala:78:14]
input [29:0] io_ptw_0_pmp_3_addr, // @[LazyRoCC.scala:78:14]
input [31:0] io_ptw_0_pmp_3_mask, // @[LazyRoCC.scala:78:14]
input io_ptw_0_pmp_4_cfg_l, // @[LazyRoCC.scala:78:14]
input [1:0] io_ptw_0_pmp_4_cfg_a, // @[LazyRoCC.scala:78:14]
input io_ptw_0_pmp_4_cfg_x, // @[LazyRoCC.scala:78:14]
input io_ptw_0_pmp_4_cfg_w, // @[LazyRoCC.scala:78:14]
input io_ptw_0_pmp_4_cfg_r, // @[LazyRoCC.scala:78:14]
input [29:0] io_ptw_0_pmp_4_addr, // @[LazyRoCC.scala:78:14]
input [31:0] io_ptw_0_pmp_4_mask, // @[LazyRoCC.scala:78:14]
input io_ptw_0_pmp_5_cfg_l, // @[LazyRoCC.scala:78:14]
input [1:0] io_ptw_0_pmp_5_cfg_a, // @[LazyRoCC.scala:78:14]
input io_ptw_0_pmp_5_cfg_x, // @[LazyRoCC.scala:78:14]
input io_ptw_0_pmp_5_cfg_w, // @[LazyRoCC.scala:78:14]
input io_ptw_0_pmp_5_cfg_r, // @[LazyRoCC.scala:78:14]
input [29:0] io_ptw_0_pmp_5_addr, // @[LazyRoCC.scala:78:14]
input [31:0] io_ptw_0_pmp_5_mask, // @[LazyRoCC.scala:78:14]
input io_ptw_0_pmp_6_cfg_l, // @[LazyRoCC.scala:78:14]
input [1:0] io_ptw_0_pmp_6_cfg_a, // @[LazyRoCC.scala:78:14]
input io_ptw_0_pmp_6_cfg_x, // @[LazyRoCC.scala:78:14]
input io_ptw_0_pmp_6_cfg_w, // @[LazyRoCC.scala:78:14]
input io_ptw_0_pmp_6_cfg_r, // @[LazyRoCC.scala:78:14]
input [29:0] io_ptw_0_pmp_6_addr, // @[LazyRoCC.scala:78:14]
input [31:0] io_ptw_0_pmp_6_mask, // @[LazyRoCC.scala:78:14]
input io_ptw_0_pmp_7_cfg_l, // @[LazyRoCC.scala:78:14]
input [1:0] io_ptw_0_pmp_7_cfg_a, // @[LazyRoCC.scala:78:14]
input io_ptw_0_pmp_7_cfg_x, // @[LazyRoCC.scala:78:14]
input io_ptw_0_pmp_7_cfg_w, // @[LazyRoCC.scala:78:14]
input io_ptw_0_pmp_7_cfg_r, // @[LazyRoCC.scala:78:14]
input [29:0] io_ptw_0_pmp_7_addr, // @[LazyRoCC.scala:78:14]
input [31:0] io_ptw_0_pmp_7_mask, // @[LazyRoCC.scala:78:14]
input io_ptw_0_customCSRs_csrs_0_ren, // @[LazyRoCC.scala:78:14]
input io_ptw_0_customCSRs_csrs_0_wen, // @[LazyRoCC.scala:78:14]
input [63:0] io_ptw_0_customCSRs_csrs_0_wdata, // @[LazyRoCC.scala:78:14]
input [63:0] io_ptw_0_customCSRs_csrs_0_value, // @[LazyRoCC.scala:78:14]
input io_ptw_0_customCSRs_csrs_1_ren, // @[LazyRoCC.scala:78:14]
input io_ptw_0_customCSRs_csrs_1_wen, // @[LazyRoCC.scala:78:14]
input [63:0] io_ptw_0_customCSRs_csrs_1_wdata, // @[LazyRoCC.scala:78:14]
input [63:0] io_ptw_0_customCSRs_csrs_1_value, // @[LazyRoCC.scala:78:14]
input io_ptw_0_customCSRs_csrs_2_ren, // @[LazyRoCC.scala:78:14]
input io_ptw_0_customCSRs_csrs_2_wen, // @[LazyRoCC.scala:78:14]
input [63:0] io_ptw_0_customCSRs_csrs_2_wdata, // @[LazyRoCC.scala:78:14]
input [63:0] io_ptw_0_customCSRs_csrs_2_value, // @[LazyRoCC.scala:78:14]
input io_ptw_0_customCSRs_csrs_3_ren, // @[LazyRoCC.scala:78:14]
input io_ptw_0_customCSRs_csrs_3_wen, // @[LazyRoCC.scala:78:14]
input [63:0] io_ptw_0_customCSRs_csrs_3_wdata, // @[LazyRoCC.scala:78:14]
input [63:0] io_ptw_0_customCSRs_csrs_3_value, // @[LazyRoCC.scala:78:14]
input io_ptw_1_req_ready, // @[LazyRoCC.scala:78:14]
output io_ptw_1_req_valid, // @[LazyRoCC.scala:78:14]
output [26:0] io_ptw_1_req_bits_bits_addr, // @[LazyRoCC.scala:78:14]
output io_ptw_1_req_bits_bits_need_gpa, // @[LazyRoCC.scala:78:14]
input io_ptw_1_resp_valid, // @[LazyRoCC.scala:78:14]
input io_ptw_1_resp_bits_ae_ptw, // @[LazyRoCC.scala:78:14]
input io_ptw_1_resp_bits_ae_final, // @[LazyRoCC.scala:78:14]
input io_ptw_1_resp_bits_pf, // @[LazyRoCC.scala:78:14]
input io_ptw_1_resp_bits_gf, // @[LazyRoCC.scala:78:14]
input io_ptw_1_resp_bits_hr, // @[LazyRoCC.scala:78:14]
input io_ptw_1_resp_bits_hw, // @[LazyRoCC.scala:78:14]
input io_ptw_1_resp_bits_hx, // @[LazyRoCC.scala:78:14]
input [9:0] io_ptw_1_resp_bits_pte_reserved_for_future, // @[LazyRoCC.scala:78:14]
input [43:0] io_ptw_1_resp_bits_pte_ppn, // @[LazyRoCC.scala:78:14]
input [1:0] io_ptw_1_resp_bits_pte_reserved_for_software, // @[LazyRoCC.scala:78:14]
input io_ptw_1_resp_bits_pte_d, // @[LazyRoCC.scala:78:14]
input io_ptw_1_resp_bits_pte_a, // @[LazyRoCC.scala:78:14]
input io_ptw_1_resp_bits_pte_g, // @[LazyRoCC.scala:78:14]
input io_ptw_1_resp_bits_pte_u, // @[LazyRoCC.scala:78:14]
input io_ptw_1_resp_bits_pte_x, // @[LazyRoCC.scala:78:14]
input io_ptw_1_resp_bits_pte_w, // @[LazyRoCC.scala:78:14]
input io_ptw_1_resp_bits_pte_r, // @[LazyRoCC.scala:78:14]
input io_ptw_1_resp_bits_pte_v, // @[LazyRoCC.scala:78:14]
input [1:0] io_ptw_1_resp_bits_level, // @[LazyRoCC.scala:78:14]
input io_ptw_1_resp_bits_homogeneous, // @[LazyRoCC.scala:78:14]
input io_ptw_1_resp_bits_gpa_valid, // @[LazyRoCC.scala:78:14]
input [38:0] io_ptw_1_resp_bits_gpa_bits, // @[LazyRoCC.scala:78:14]
input io_ptw_1_resp_bits_gpa_is_pte, // @[LazyRoCC.scala:78:14]
input [3:0] io_ptw_1_ptbr_mode, // @[LazyRoCC.scala:78:14]
input [43:0] io_ptw_1_ptbr_ppn, // @[LazyRoCC.scala:78:14]
input io_ptw_1_status_debug, // @[LazyRoCC.scala:78:14]
input io_ptw_1_status_cease, // @[LazyRoCC.scala:78:14]
input io_ptw_1_status_wfi, // @[LazyRoCC.scala:78:14]
input [31:0] io_ptw_1_status_isa, // @[LazyRoCC.scala:78:14]
input [1:0] io_ptw_1_status_dprv, // @[LazyRoCC.scala:78:14]
input io_ptw_1_status_dv, // @[LazyRoCC.scala:78:14]
input [1:0] io_ptw_1_status_prv, // @[LazyRoCC.scala:78:14]
input io_ptw_1_status_v, // @[LazyRoCC.scala:78:14]
input io_ptw_1_status_mpv, // @[LazyRoCC.scala:78:14]
input io_ptw_1_status_gva, // @[LazyRoCC.scala:78:14]
input io_ptw_1_status_tsr, // @[LazyRoCC.scala:78:14]
input io_ptw_1_status_tw, // @[LazyRoCC.scala:78:14]
input io_ptw_1_status_tvm, // @[LazyRoCC.scala:78:14]
input io_ptw_1_status_mxr, // @[LazyRoCC.scala:78:14]
input io_ptw_1_status_sum, // @[LazyRoCC.scala:78:14]
input io_ptw_1_status_mprv, // @[LazyRoCC.scala:78:14]
input [1:0] io_ptw_1_status_fs, // @[LazyRoCC.scala:78:14]
input [1:0] io_ptw_1_status_mpp, // @[LazyRoCC.scala:78:14]
input io_ptw_1_status_spp, // @[LazyRoCC.scala:78:14]
input io_ptw_1_status_mpie, // @[LazyRoCC.scala:78:14]
input io_ptw_1_status_spie, // @[LazyRoCC.scala:78:14]
input io_ptw_1_status_mie, // @[LazyRoCC.scala:78:14]
input io_ptw_1_status_sie, // @[LazyRoCC.scala:78:14]
input io_ptw_1_hstatus_spvp, // @[LazyRoCC.scala:78:14]
input io_ptw_1_hstatus_spv, // @[LazyRoCC.scala:78:14]
input io_ptw_1_hstatus_gva, // @[LazyRoCC.scala:78:14]
input io_ptw_1_gstatus_debug, // @[LazyRoCC.scala:78:14]
input io_ptw_1_gstatus_cease, // @[LazyRoCC.scala:78:14]
input io_ptw_1_gstatus_wfi, // @[LazyRoCC.scala:78:14]
input [31:0] io_ptw_1_gstatus_isa, // @[LazyRoCC.scala:78:14]
input [1:0] io_ptw_1_gstatus_dprv, // @[LazyRoCC.scala:78:14]
input io_ptw_1_gstatus_dv, // @[LazyRoCC.scala:78:14]
input [1:0] io_ptw_1_gstatus_prv, // @[LazyRoCC.scala:78:14]
input io_ptw_1_gstatus_v, // @[LazyRoCC.scala:78:14]
input [22:0] io_ptw_1_gstatus_zero2, // @[LazyRoCC.scala:78:14]
input io_ptw_1_gstatus_mpv, // @[LazyRoCC.scala:78:14]
input io_ptw_1_gstatus_gva, // @[LazyRoCC.scala:78:14]
input io_ptw_1_gstatus_mbe, // @[LazyRoCC.scala:78:14]
input io_ptw_1_gstatus_sbe, // @[LazyRoCC.scala:78:14]
input [1:0] io_ptw_1_gstatus_sxl, // @[LazyRoCC.scala:78:14]
input [7:0] io_ptw_1_gstatus_zero1, // @[LazyRoCC.scala:78:14]
input io_ptw_1_gstatus_tsr, // @[LazyRoCC.scala:78:14]
input io_ptw_1_gstatus_tw, // @[LazyRoCC.scala:78:14]
input io_ptw_1_gstatus_tvm, // @[LazyRoCC.scala:78:14]
input io_ptw_1_gstatus_mxr, // @[LazyRoCC.scala:78:14]
input io_ptw_1_gstatus_sum, // @[LazyRoCC.scala:78:14]
input io_ptw_1_gstatus_mprv, // @[LazyRoCC.scala:78:14]
input [1:0] io_ptw_1_gstatus_fs, // @[LazyRoCC.scala:78:14]
input [1:0] io_ptw_1_gstatus_mpp, // @[LazyRoCC.scala:78:14]
input [1:0] io_ptw_1_gstatus_vs, // @[LazyRoCC.scala:78:14]
input io_ptw_1_gstatus_spp, // @[LazyRoCC.scala:78:14]
input io_ptw_1_gstatus_mpie, // @[LazyRoCC.scala:78:14]
input io_ptw_1_gstatus_ube, // @[LazyRoCC.scala:78:14]
input io_ptw_1_gstatus_spie, // @[LazyRoCC.scala:78:14]
input io_ptw_1_gstatus_upie, // @[LazyRoCC.scala:78:14]
input io_ptw_1_gstatus_mie, // @[LazyRoCC.scala:78:14]
input io_ptw_1_gstatus_hie, // @[LazyRoCC.scala:78:14]
input io_ptw_1_gstatus_sie, // @[LazyRoCC.scala:78:14]
input io_ptw_1_gstatus_uie, // @[LazyRoCC.scala:78:14]
input io_ptw_1_pmp_0_cfg_l, // @[LazyRoCC.scala:78:14]
input [1:0] io_ptw_1_pmp_0_cfg_a, // @[LazyRoCC.scala:78:14]
input io_ptw_1_pmp_0_cfg_x, // @[LazyRoCC.scala:78:14]
input io_ptw_1_pmp_0_cfg_w, // @[LazyRoCC.scala:78:14]
input io_ptw_1_pmp_0_cfg_r, // @[LazyRoCC.scala:78:14]
input [29:0] io_ptw_1_pmp_0_addr, // @[LazyRoCC.scala:78:14]
input [31:0] io_ptw_1_pmp_0_mask, // @[LazyRoCC.scala:78:14]
input io_ptw_1_pmp_1_cfg_l, // @[LazyRoCC.scala:78:14]
input [1:0] io_ptw_1_pmp_1_cfg_a, // @[LazyRoCC.scala:78:14]
input io_ptw_1_pmp_1_cfg_x, // @[LazyRoCC.scala:78:14]
input io_ptw_1_pmp_1_cfg_w, // @[LazyRoCC.scala:78:14]
input io_ptw_1_pmp_1_cfg_r, // @[LazyRoCC.scala:78:14]
input [29:0] io_ptw_1_pmp_1_addr, // @[LazyRoCC.scala:78:14]
input [31:0] io_ptw_1_pmp_1_mask, // @[LazyRoCC.scala:78:14]
input io_ptw_1_pmp_2_cfg_l, // @[LazyRoCC.scala:78:14]
input [1:0] io_ptw_1_pmp_2_cfg_a, // @[LazyRoCC.scala:78:14]
input io_ptw_1_pmp_2_cfg_x, // @[LazyRoCC.scala:78:14]
input io_ptw_1_pmp_2_cfg_w, // @[LazyRoCC.scala:78:14]
input io_ptw_1_pmp_2_cfg_r, // @[LazyRoCC.scala:78:14]
input [29:0] io_ptw_1_pmp_2_addr, // @[LazyRoCC.scala:78:14]
input [31:0] io_ptw_1_pmp_2_mask, // @[LazyRoCC.scala:78:14]
input io_ptw_1_pmp_3_cfg_l, // @[LazyRoCC.scala:78:14]
input [1:0] io_ptw_1_pmp_3_cfg_a, // @[LazyRoCC.scala:78:14]
input io_ptw_1_pmp_3_cfg_x, // @[LazyRoCC.scala:78:14]
input io_ptw_1_pmp_3_cfg_w, // @[LazyRoCC.scala:78:14]
input io_ptw_1_pmp_3_cfg_r, // @[LazyRoCC.scala:78:14]
input [29:0] io_ptw_1_pmp_3_addr, // @[LazyRoCC.scala:78:14]
input [31:0] io_ptw_1_pmp_3_mask, // @[LazyRoCC.scala:78:14]
input io_ptw_1_pmp_4_cfg_l, // @[LazyRoCC.scala:78:14]
input [1:0] io_ptw_1_pmp_4_cfg_a, // @[LazyRoCC.scala:78:14]
input io_ptw_1_pmp_4_cfg_x, // @[LazyRoCC.scala:78:14]
input io_ptw_1_pmp_4_cfg_w, // @[LazyRoCC.scala:78:14]
input io_ptw_1_pmp_4_cfg_r, // @[LazyRoCC.scala:78:14]
input [29:0] io_ptw_1_pmp_4_addr, // @[LazyRoCC.scala:78:14]
input [31:0] io_ptw_1_pmp_4_mask, // @[LazyRoCC.scala:78:14]
input io_ptw_1_pmp_5_cfg_l, // @[LazyRoCC.scala:78:14]
input [1:0] io_ptw_1_pmp_5_cfg_a, // @[LazyRoCC.scala:78:14]
input io_ptw_1_pmp_5_cfg_x, // @[LazyRoCC.scala:78:14]
input io_ptw_1_pmp_5_cfg_w, // @[LazyRoCC.scala:78:14]
input io_ptw_1_pmp_5_cfg_r, // @[LazyRoCC.scala:78:14]
input [29:0] io_ptw_1_pmp_5_addr, // @[LazyRoCC.scala:78:14]
input [31:0] io_ptw_1_pmp_5_mask, // @[LazyRoCC.scala:78:14]
input io_ptw_1_pmp_6_cfg_l, // @[LazyRoCC.scala:78:14]
input [1:0] io_ptw_1_pmp_6_cfg_a, // @[LazyRoCC.scala:78:14]
input io_ptw_1_pmp_6_cfg_x, // @[LazyRoCC.scala:78:14]
input io_ptw_1_pmp_6_cfg_w, // @[LazyRoCC.scala:78:14]
input io_ptw_1_pmp_6_cfg_r, // @[LazyRoCC.scala:78:14]
input [29:0] io_ptw_1_pmp_6_addr, // @[LazyRoCC.scala:78:14]
input [31:0] io_ptw_1_pmp_6_mask, // @[LazyRoCC.scala:78:14]
input io_ptw_1_pmp_7_cfg_l, // @[LazyRoCC.scala:78:14]
input [1:0] io_ptw_1_pmp_7_cfg_a, // @[LazyRoCC.scala:78:14]
input io_ptw_1_pmp_7_cfg_x, // @[LazyRoCC.scala:78:14]
input io_ptw_1_pmp_7_cfg_w, // @[LazyRoCC.scala:78:14]
input io_ptw_1_pmp_7_cfg_r, // @[LazyRoCC.scala:78:14]
input [29:0] io_ptw_1_pmp_7_addr, // @[LazyRoCC.scala:78:14]
input [31:0] io_ptw_1_pmp_7_mask, // @[LazyRoCC.scala:78:14]
input io_ptw_1_customCSRs_csrs_0_ren, // @[LazyRoCC.scala:78:14]
input io_ptw_1_customCSRs_csrs_0_wen, // @[LazyRoCC.scala:78:14]
input [63:0] io_ptw_1_customCSRs_csrs_0_wdata, // @[LazyRoCC.scala:78:14]
input [63:0] io_ptw_1_customCSRs_csrs_0_value, // @[LazyRoCC.scala:78:14]
input io_ptw_1_customCSRs_csrs_1_ren, // @[LazyRoCC.scala:78:14]
input io_ptw_1_customCSRs_csrs_1_wen, // @[LazyRoCC.scala:78:14]
input [63:0] io_ptw_1_customCSRs_csrs_1_wdata, // @[LazyRoCC.scala:78:14]
input [63:0] io_ptw_1_customCSRs_csrs_1_value, // @[LazyRoCC.scala:78:14]
input io_ptw_1_customCSRs_csrs_2_ren, // @[LazyRoCC.scala:78:14]
input io_ptw_1_customCSRs_csrs_2_wen, // @[LazyRoCC.scala:78:14]
input [63:0] io_ptw_1_customCSRs_csrs_2_wdata, // @[LazyRoCC.scala:78:14]
input [63:0] io_ptw_1_customCSRs_csrs_2_value, // @[LazyRoCC.scala:78:14]
input io_ptw_1_customCSRs_csrs_3_ren, // @[LazyRoCC.scala:78:14]
input io_ptw_1_customCSRs_csrs_3_wen, // @[LazyRoCC.scala:78:14]
input [63:0] io_ptw_1_customCSRs_csrs_3_wdata, // @[LazyRoCC.scala:78:14]
input [63:0] io_ptw_1_customCSRs_csrs_3_value // @[LazyRoCC.scala:78:14]
);
wire widget_1_auto_anon_out_d_valid; // @[WidthWidget.scala:27:9]
wire widget_1_auto_anon_out_d_ready; // @[WidthWidget.scala:27:9]
wire widget_1_auto_anon_out_d_bits_corrupt; // @[WidthWidget.scala:27:9]
wire [255:0] widget_1_auto_anon_out_d_bits_data; // @[WidthWidget.scala:27:9]
wire widget_1_auto_anon_out_d_bits_denied; // @[WidthWidget.scala:27:9]
wire [4:0] widget_1_auto_anon_out_d_bits_sink; // @[WidthWidget.scala:27:9]
wire [4:0] widget_1_auto_anon_out_d_bits_source; // @[WidthWidget.scala:27:9]
wire [3:0] widget_1_auto_anon_out_d_bits_size; // @[WidthWidget.scala:27:9]
wire [1:0] widget_1_auto_anon_out_d_bits_param; // @[WidthWidget.scala:27:9]
wire [2:0] widget_1_auto_anon_out_d_bits_opcode; // @[WidthWidget.scala:27:9]
wire widget_1_auto_anon_out_a_valid; // @[WidthWidget.scala:27:9]
wire widget_1_auto_anon_out_a_ready; // @[WidthWidget.scala:27:9]
wire widget_1_auto_anon_out_a_bits_corrupt; // @[WidthWidget.scala:27:9]
wire [255:0] widget_1_auto_anon_out_a_bits_data; // @[WidthWidget.scala:27:9]
wire [31:0] widget_1_auto_anon_out_a_bits_mask; // @[WidthWidget.scala:27:9]
wire [31:0] widget_1_auto_anon_out_a_bits_address; // @[WidthWidget.scala:27:9]
wire [4:0] widget_1_auto_anon_out_a_bits_source; // @[WidthWidget.scala:27:9]
wire [3:0] widget_1_auto_anon_out_a_bits_size; // @[WidthWidget.scala:27:9]
wire [2:0] widget_1_auto_anon_out_a_bits_param; // @[WidthWidget.scala:27:9]
wire [2:0] widget_1_auto_anon_out_a_bits_opcode; // @[WidthWidget.scala:27:9]
wire widget_1_auto_anon_in_d_ready; // @[WidthWidget.scala:27:9]
wire widget_1_auto_anon_in_a_valid; // @[WidthWidget.scala:27:9]
wire widget_1_auto_anon_in_a_bits_corrupt; // @[WidthWidget.scala:27:9]
wire [255:0] widget_1_auto_anon_in_a_bits_data; // @[WidthWidget.scala:27:9]
wire [31:0] widget_1_auto_anon_in_a_bits_mask; // @[WidthWidget.scala:27:9]
wire [31:0] widget_1_auto_anon_in_a_bits_address; // @[WidthWidget.scala:27:9]
wire [4:0] widget_1_auto_anon_in_a_bits_source; // @[WidthWidget.scala:27:9]
wire [3:0] widget_1_auto_anon_in_a_bits_size; // @[WidthWidget.scala:27:9]
wire [2:0] widget_1_auto_anon_in_a_bits_param; // @[WidthWidget.scala:27:9]
wire [2:0] widget_1_auto_anon_in_a_bits_opcode; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_out_d_valid; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_out_d_ready; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_out_d_bits_corrupt; // @[WidthWidget.scala:27:9]
wire [255:0] widget_auto_anon_out_d_bits_data; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_out_d_bits_denied; // @[WidthWidget.scala:27:9]
wire [4:0] widget_auto_anon_out_d_bits_sink; // @[WidthWidget.scala:27:9]
wire [4:0] widget_auto_anon_out_d_bits_source; // @[WidthWidget.scala:27:9]
wire [3:0] widget_auto_anon_out_d_bits_size; // @[WidthWidget.scala:27:9]
wire [1:0] widget_auto_anon_out_d_bits_param; // @[WidthWidget.scala:27:9]
wire [2:0] widget_auto_anon_out_d_bits_opcode; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_out_a_valid; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_out_a_ready; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_out_a_bits_corrupt; // @[WidthWidget.scala:27:9]
wire [255:0] widget_auto_anon_out_a_bits_data; // @[WidthWidget.scala:27:9]
wire [31:0] widget_auto_anon_out_a_bits_mask; // @[WidthWidget.scala:27:9]
wire [31:0] widget_auto_anon_out_a_bits_address; // @[WidthWidget.scala:27:9]
wire [4:0] widget_auto_anon_out_a_bits_source; // @[WidthWidget.scala:27:9]
wire [3:0] widget_auto_anon_out_a_bits_size; // @[WidthWidget.scala:27:9]
wire [2:0] widget_auto_anon_out_a_bits_param; // @[WidthWidget.scala:27:9]
wire [2:0] widget_auto_anon_out_a_bits_opcode; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_in_d_ready; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_in_a_valid; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_in_a_bits_corrupt; // @[WidthWidget.scala:27:9]
wire [255:0] widget_auto_anon_in_a_bits_data; // @[WidthWidget.scala:27:9]
wire [31:0] widget_auto_anon_in_a_bits_mask; // @[WidthWidget.scala:27:9]
wire [31:0] widget_auto_anon_in_a_bits_address; // @[WidthWidget.scala:27:9]
wire [4:0] widget_auto_anon_in_a_bits_source; // @[WidthWidget.scala:27:9]
wire [3:0] widget_auto_anon_in_a_bits_size; // @[WidthWidget.scala:27:9]
wire [2:0] widget_auto_anon_in_a_bits_param; // @[WidthWidget.scala:27:9]
wire [2:0] widget_auto_anon_in_a_bits_opcode; // @[WidthWidget.scala:27:9]
wire [5:0] _streamer_load_data_queue_io_enq_bits_chunk_data_io_mem_stream_user_consumed_bytes; // @[Top.scala:36:29]
wire _streamer_load_data_queue_io_enq_bits_chunk_data_io_mem_stream_output_ready; // @[Top.scala:36:29]
wire _streamer_load_data_queue_io_enq_bits_chunk_data_io_memwrites_in_valid; // @[Top.scala:36:29]
wire [255:0] _streamer_load_data_queue_io_enq_bits_chunk_data_io_memwrites_in_bits_data; // @[Top.scala:36:29]
wire [5:0] _streamer_load_data_queue_io_enq_bits_chunk_data_io_memwrites_in_bits_validbytes; // @[Top.scala:36:29]
wire _streamer_load_data_queue_io_enq_bits_chunk_data_io_memwrites_in_bits_end_of_message; // @[Top.scala:36:29]
wire _memwriter_io_memwrites_in_ready; // @[MemStreamerAccel.scala:67:25]
wire _memwriter_io_l2io_req_valid; // @[MemStreamerAccel.scala:67:25]
wire [63:0] _memwriter_io_l2io_req_bits_addr; // @[MemStreamerAccel.scala:67:25]
wire [2:0] _memwriter_io_l2io_req_bits_size; // @[MemStreamerAccel.scala:67:25]
wire [255:0] _memwriter_io_l2io_req_bits_data; // @[MemStreamerAccel.scala:67:25]
wire _memwriter_io_decompress_dest_info_ready; // @[MemStreamerAccel.scala:67:25]
wire [63:0] _memwriter_io_bufs_completed; // @[MemStreamerAccel.scala:67:25]
wire _memwriter_io_no_writes_inflight; // @[MemStreamerAccel.scala:67:25]
wire _memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_valid; // @[Top.scala:35:31]
wire _memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_debug; // @[Top.scala:35:31]
wire _memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_cease; // @[Top.scala:35:31]
wire _memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_wfi; // @[Top.scala:35:31]
wire [31:0] _memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_isa; // @[Top.scala:35:31]
wire [1:0] _memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_dprv; // @[Top.scala:35:31]
wire _memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_dv; // @[Top.scala:35:31]
wire [1:0] _memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_prv; // @[Top.scala:35:31]
wire _memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_v; // @[Top.scala:35:31]
wire _memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_sd; // @[Top.scala:35:31]
wire [22:0] _memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_zero2; // @[Top.scala:35:31]
wire _memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_mpv; // @[Top.scala:35:31]
wire _memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_gva; // @[Top.scala:35:31]
wire _memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_mbe; // @[Top.scala:35:31]
wire _memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_sbe; // @[Top.scala:35:31]
wire [1:0] _memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_sxl; // @[Top.scala:35:31]
wire [1:0] _memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_uxl; // @[Top.scala:35:31]
wire _memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_sd_rv32; // @[Top.scala:35:31]
wire [7:0] _memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_zero1; // @[Top.scala:35:31]
wire _memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_tsr; // @[Top.scala:35:31]
wire _memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_tw; // @[Top.scala:35:31]
wire _memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_tvm; // @[Top.scala:35:31]
wire _memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_mxr; // @[Top.scala:35:31]
wire _memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_sum; // @[Top.scala:35:31]
wire _memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_mprv; // @[Top.scala:35:31]
wire [1:0] _memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_xs; // @[Top.scala:35:31]
wire [1:0] _memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_fs; // @[Top.scala:35:31]
wire [1:0] _memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_mpp; // @[Top.scala:35:31]
wire [1:0] _memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_vs; // @[Top.scala:35:31]
wire _memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_spp; // @[Top.scala:35:31]
wire _memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_mpie; // @[Top.scala:35:31]
wire _memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_ube; // @[Top.scala:35:31]
wire _memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_spie; // @[Top.scala:35:31]
wire _memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_upie; // @[Top.scala:35:31]
wire _memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_mie; // @[Top.scala:35:31]
wire _memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_hie; // @[Top.scala:35:31]
wire _memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_sie; // @[Top.scala:35:31]
wire _memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_uie; // @[Top.scala:35:31]
wire _memloader_io_src_info_cmd_router_cur_funct_io_sfence_out; // @[Top.scala:35:31]
wire _memloader_io_src_info_cmd_router_cur_funct_io_src_info_valid; // @[Top.scala:35:31]
wire [63:0] _memloader_io_src_info_cmd_router_cur_funct_io_src_info_bits_ip; // @[Top.scala:35:31]
wire [63:0] _memloader_io_src_info_cmd_router_cur_funct_io_src_info_bits_isize; // @[Top.scala:35:31]
wire _memloader_io_src_info_cmd_router_cur_funct_io_dest_info_valid; // @[Top.scala:35:31]
wire [63:0] _memloader_io_src_info_cmd_router_cur_funct_io_dest_info_bits_op; // @[Top.scala:35:31]
wire [63:0] _memloader_io_src_info_cmd_router_cur_funct_io_dest_info_bits_cmpflag; // @[Top.scala:35:31]
wire _memloader_io_src_info_cmd_router_cur_funct_io_key_valid; // @[Top.scala:35:31]
wire [255:0] _memloader_io_src_info_cmd_router_cur_funct_io_key_bits; // @[Top.scala:35:31]
wire _memloader_io_src_info_cmd_router_cur_funct_io_mode_valid; // @[Top.scala:35:31]
wire _memloader_io_src_info_cmd_router_cur_funct_io_mode_bits; // @[Top.scala:35:31]
wire _memloader_io_l2helperUser_req_valid; // @[MemStreamerAccel.scala:63:25]
wire [70:0] _memloader_io_l2helperUser_req_bits_addr; // @[MemStreamerAccel.scala:63:25]
wire _memloader_io_l2helperUser_resp_ready; // @[MemStreamerAccel.scala:63:25]
wire _memloader_io_src_info_ready; // @[MemStreamerAccel.scala:63:25]
wire [5:0] _memloader_io_consumer_available_output_bytes; // @[MemStreamerAccel.scala:63:25]
wire _memloader_io_consumer_output_valid; // @[MemStreamerAccel.scala:63:25]
wire [255:0] _memloader_io_consumer_output_data; // @[MemStreamerAccel.scala:63:25]
wire _memloader_io_consumer_output_last_chunk; // @[MemStreamerAccel.scala:63:25]
wire _buffers_1_auto_in_a_ready; // @[Buffer.scala:80:47]
wire _buffers_1_auto_in_d_valid; // @[Buffer.scala:80:47]
wire [2:0] _buffers_1_auto_in_d_bits_opcode; // @[Buffer.scala:80:47]
wire [1:0] _buffers_1_auto_in_d_bits_param; // @[Buffer.scala:80:47]
wire [3:0] _buffers_1_auto_in_d_bits_size; // @[Buffer.scala:80:47]
wire [4:0] _buffers_1_auto_in_d_bits_source; // @[Buffer.scala:80:47]
wire [4:0] _buffers_1_auto_in_d_bits_sink; // @[Buffer.scala:80:47]
wire _buffers_1_auto_in_d_bits_denied; // @[Buffer.scala:80:47]
wire [255:0] _buffers_1_auto_in_d_bits_data; // @[Buffer.scala:80:47]
wire _buffers_1_auto_in_d_bits_corrupt; // @[Buffer.scala:80:47]
wire _l2_memwriter_auto_master_out_a_valid; // @[MemStreamerAccel.scala:37:36]
wire [2:0] _l2_memwriter_auto_master_out_a_bits_opcode; // @[MemStreamerAccel.scala:37:36]
wire [3:0] _l2_memwriter_auto_master_out_a_bits_size; // @[MemStreamerAccel.scala:37:36]
wire [4:0] _l2_memwriter_auto_master_out_a_bits_source; // @[MemStreamerAccel.scala:37:36]
wire [31:0] _l2_memwriter_auto_master_out_a_bits_address; // @[MemStreamerAccel.scala:37:36]
wire [31:0] _l2_memwriter_auto_master_out_a_bits_mask; // @[MemStreamerAccel.scala:37:36]
wire [255:0] _l2_memwriter_auto_master_out_a_bits_data; // @[MemStreamerAccel.scala:37:36]
wire _l2_memwriter_auto_master_out_d_ready; // @[MemStreamerAccel.scala:37:36]
wire _l2_memwriter_io_userif_req_ready; // @[MemStreamerAccel.scala:37:36]
wire _l2_memwriter_io_userif_resp_valid; // @[MemStreamerAccel.scala:37:36]
wire [255:0] _l2_memwriter_io_userif_resp_bits_data; // @[MemStreamerAccel.scala:37:36]
wire _l2_memwriter_io_userif_no_memops_inflight; // @[MemStreamerAccel.scala:37:36]
wire _buffers_auto_in_a_ready; // @[Buffer.scala:80:47]
wire _buffers_auto_in_d_valid; // @[Buffer.scala:80:47]
wire [2:0] _buffers_auto_in_d_bits_opcode; // @[Buffer.scala:80:47]
wire [1:0] _buffers_auto_in_d_bits_param; // @[Buffer.scala:80:47]
wire [3:0] _buffers_auto_in_d_bits_size; // @[Buffer.scala:80:47]
wire [4:0] _buffers_auto_in_d_bits_source; // @[Buffer.scala:80:47]
wire [4:0] _buffers_auto_in_d_bits_sink; // @[Buffer.scala:80:47]
wire _buffers_auto_in_d_bits_denied; // @[Buffer.scala:80:47]
wire [255:0] _buffers_auto_in_d_bits_data; // @[Buffer.scala:80:47]
wire _buffers_auto_in_d_bits_corrupt; // @[Buffer.scala:80:47]
wire _l2_memloader_auto_master_out_a_valid; // @[MemStreamerAccel.scala:34:36]
wire [2:0] _l2_memloader_auto_master_out_a_bits_opcode; // @[MemStreamerAccel.scala:34:36]
wire [3:0] _l2_memloader_auto_master_out_a_bits_size; // @[MemStreamerAccel.scala:34:36]
wire [4:0] _l2_memloader_auto_master_out_a_bits_source; // @[MemStreamerAccel.scala:34:36]
wire [31:0] _l2_memloader_auto_master_out_a_bits_address; // @[MemStreamerAccel.scala:34:36]
wire [31:0] _l2_memloader_auto_master_out_a_bits_mask; // @[MemStreamerAccel.scala:34:36]
wire [255:0] _l2_memloader_auto_master_out_a_bits_data; // @[MemStreamerAccel.scala:34:36]
wire _l2_memloader_auto_master_out_d_ready; // @[MemStreamerAccel.scala:34:36]
wire _l2_memloader_io_userif_req_ready; // @[MemStreamerAccel.scala:34:36]
wire _l2_memloader_io_userif_resp_valid; // @[MemStreamerAccel.scala:34:36]
wire [255:0] _l2_memloader_io_userif_resp_bits_data; // @[MemStreamerAccel.scala:34:36]
wire _l2_memloader_io_userif_no_memops_inflight; // @[MemStreamerAccel.scala:34:36]
wire auto_atl_out_1_a_ready_0 = auto_atl_out_1_a_ready; // @[Top.scala:30:7]
wire auto_atl_out_1_d_valid_0 = auto_atl_out_1_d_valid; // @[Top.scala:30:7]
wire [2:0] auto_atl_out_1_d_bits_opcode_0 = auto_atl_out_1_d_bits_opcode; // @[Top.scala:30:7]
wire [1:0] auto_atl_out_1_d_bits_param_0 = auto_atl_out_1_d_bits_param; // @[Top.scala:30:7]
wire [3:0] auto_atl_out_1_d_bits_size_0 = auto_atl_out_1_d_bits_size; // @[Top.scala:30:7]
wire [4:0] auto_atl_out_1_d_bits_source_0 = auto_atl_out_1_d_bits_source; // @[Top.scala:30:7]
wire [4:0] auto_atl_out_1_d_bits_sink_0 = auto_atl_out_1_d_bits_sink; // @[Top.scala:30:7]
wire auto_atl_out_1_d_bits_denied_0 = auto_atl_out_1_d_bits_denied; // @[Top.scala:30:7]
wire [255:0] auto_atl_out_1_d_bits_data_0 = auto_atl_out_1_d_bits_data; // @[Top.scala:30:7]
wire auto_atl_out_1_d_bits_corrupt_0 = auto_atl_out_1_d_bits_corrupt; // @[Top.scala:30:7]
wire auto_atl_out_0_a_ready_0 = auto_atl_out_0_a_ready; // @[Top.scala:30:7]
wire auto_atl_out_0_d_valid_0 = auto_atl_out_0_d_valid; // @[Top.scala:30:7]
wire [2:0] auto_atl_out_0_d_bits_opcode_0 = auto_atl_out_0_d_bits_opcode; // @[Top.scala:30:7]
wire [1:0] auto_atl_out_0_d_bits_param_0 = auto_atl_out_0_d_bits_param; // @[Top.scala:30:7]
wire [3:0] auto_atl_out_0_d_bits_size_0 = auto_atl_out_0_d_bits_size; // @[Top.scala:30:7]
wire [4:0] auto_atl_out_0_d_bits_source_0 = auto_atl_out_0_d_bits_source; // @[Top.scala:30:7]
wire [4:0] auto_atl_out_0_d_bits_sink_0 = auto_atl_out_0_d_bits_sink; // @[Top.scala:30:7]
wire auto_atl_out_0_d_bits_denied_0 = auto_atl_out_0_d_bits_denied; // @[Top.scala:30:7]
wire [255:0] auto_atl_out_0_d_bits_data_0 = auto_atl_out_0_d_bits_data; // @[Top.scala:30:7]
wire auto_atl_out_0_d_bits_corrupt_0 = auto_atl_out_0_d_bits_corrupt; // @[Top.scala:30:7]
wire io_cmd_valid_0 = io_cmd_valid; // @[Top.scala:30:7]
wire [6:0] io_cmd_bits_inst_funct_0 = io_cmd_bits_inst_funct; // @[Top.scala:30:7]
wire [4:0] io_cmd_bits_inst_rs2_0 = io_cmd_bits_inst_rs2; // @[Top.scala:30:7]
wire [4:0] io_cmd_bits_inst_rs1_0 = io_cmd_bits_inst_rs1; // @[Top.scala:30:7]
wire io_cmd_bits_inst_xd_0 = io_cmd_bits_inst_xd; // @[Top.scala:30:7]
wire io_cmd_bits_inst_xs1_0 = io_cmd_bits_inst_xs1; // @[Top.scala:30:7]
wire io_cmd_bits_inst_xs2_0 = io_cmd_bits_inst_xs2; // @[Top.scala:30:7]
wire [4:0] io_cmd_bits_inst_rd_0 = io_cmd_bits_inst_rd; // @[Top.scala:30:7]
wire [6:0] io_cmd_bits_inst_opcode_0 = io_cmd_bits_inst_opcode; // @[Top.scala:30:7]
wire [63:0] io_cmd_bits_rs1_0 = io_cmd_bits_rs1; // @[Top.scala:30:7]
wire [63:0] io_cmd_bits_rs2_0 = io_cmd_bits_rs2; // @[Top.scala:30:7]
wire io_cmd_bits_status_debug_0 = io_cmd_bits_status_debug; // @[Top.scala:30:7]
wire io_cmd_bits_status_cease_0 = io_cmd_bits_status_cease; // @[Top.scala:30:7]
wire io_cmd_bits_status_wfi_0 = io_cmd_bits_status_wfi; // @[Top.scala:30:7]
wire [31:0] io_cmd_bits_status_isa_0 = io_cmd_bits_status_isa; // @[Top.scala:30:7]
wire [1:0] io_cmd_bits_status_dprv_0 = io_cmd_bits_status_dprv; // @[Top.scala:30:7]
wire io_cmd_bits_status_dv_0 = io_cmd_bits_status_dv; // @[Top.scala:30:7]
wire [1:0] io_cmd_bits_status_prv_0 = io_cmd_bits_status_prv; // @[Top.scala:30:7]
wire io_cmd_bits_status_v_0 = io_cmd_bits_status_v; // @[Top.scala:30:7]
wire io_cmd_bits_status_sd_0 = io_cmd_bits_status_sd; // @[Top.scala:30:7]
wire [22:0] io_cmd_bits_status_zero2_0 = io_cmd_bits_status_zero2; // @[Top.scala:30:7]
wire io_cmd_bits_status_mpv_0 = io_cmd_bits_status_mpv; // @[Top.scala:30:7]
wire io_cmd_bits_status_gva_0 = io_cmd_bits_status_gva; // @[Top.scala:30:7]
wire io_cmd_bits_status_mbe_0 = io_cmd_bits_status_mbe; // @[Top.scala:30:7]
wire io_cmd_bits_status_sbe_0 = io_cmd_bits_status_sbe; // @[Top.scala:30:7]
wire [1:0] io_cmd_bits_status_sxl_0 = io_cmd_bits_status_sxl; // @[Top.scala:30:7]
wire [1:0] io_cmd_bits_status_uxl_0 = io_cmd_bits_status_uxl; // @[Top.scala:30:7]
wire io_cmd_bits_status_sd_rv32_0 = io_cmd_bits_status_sd_rv32; // @[Top.scala:30:7]
wire [7:0] io_cmd_bits_status_zero1_0 = io_cmd_bits_status_zero1; // @[Top.scala:30:7]
wire io_cmd_bits_status_tsr_0 = io_cmd_bits_status_tsr; // @[Top.scala:30:7]
wire io_cmd_bits_status_tw_0 = io_cmd_bits_status_tw; // @[Top.scala:30:7]
wire io_cmd_bits_status_tvm_0 = io_cmd_bits_status_tvm; // @[Top.scala:30:7]
wire io_cmd_bits_status_mxr_0 = io_cmd_bits_status_mxr; // @[Top.scala:30:7]
wire io_cmd_bits_status_sum_0 = io_cmd_bits_status_sum; // @[Top.scala:30:7]
wire io_cmd_bits_status_mprv_0 = io_cmd_bits_status_mprv; // @[Top.scala:30:7]
wire [1:0] io_cmd_bits_status_xs_0 = io_cmd_bits_status_xs; // @[Top.scala:30:7]
wire [1:0] io_cmd_bits_status_fs_0 = io_cmd_bits_status_fs; // @[Top.scala:30:7]
wire [1:0] io_cmd_bits_status_mpp_0 = io_cmd_bits_status_mpp; // @[Top.scala:30:7]
wire [1:0] io_cmd_bits_status_vs_0 = io_cmd_bits_status_vs; // @[Top.scala:30:7]
wire io_cmd_bits_status_spp_0 = io_cmd_bits_status_spp; // @[Top.scala:30:7]
wire io_cmd_bits_status_mpie_0 = io_cmd_bits_status_mpie; // @[Top.scala:30:7]
wire io_cmd_bits_status_ube_0 = io_cmd_bits_status_ube; // @[Top.scala:30:7]
wire io_cmd_bits_status_spie_0 = io_cmd_bits_status_spie; // @[Top.scala:30:7]
wire io_cmd_bits_status_upie_0 = io_cmd_bits_status_upie; // @[Top.scala:30:7]
wire io_cmd_bits_status_mie_0 = io_cmd_bits_status_mie; // @[Top.scala:30:7]
wire io_cmd_bits_status_hie_0 = io_cmd_bits_status_hie; // @[Top.scala:30:7]
wire io_cmd_bits_status_sie_0 = io_cmd_bits_status_sie; // @[Top.scala:30:7]
wire io_cmd_bits_status_uie_0 = io_cmd_bits_status_uie; // @[Top.scala:30:7]
wire io_resp_ready_0 = io_resp_ready; // @[Top.scala:30:7]
wire io_mem_req_ready_0 = io_mem_req_ready; // @[Top.scala:30:7]
wire io_mem_resp_valid_0 = io_mem_resp_valid; // @[Top.scala:30:7]
wire [39:0] io_mem_resp_bits_addr_0 = io_mem_resp_bits_addr; // @[Top.scala:30:7]
wire [7:0] io_mem_resp_bits_tag_0 = io_mem_resp_bits_tag; // @[Top.scala:30:7]
wire [4:0] io_mem_resp_bits_cmd_0 = io_mem_resp_bits_cmd; // @[Top.scala:30:7]
wire [1:0] io_mem_resp_bits_size_0 = io_mem_resp_bits_size; // @[Top.scala:30:7]
wire io_mem_resp_bits_signed_0 = io_mem_resp_bits_signed; // @[Top.scala:30:7]
wire [1:0] io_mem_resp_bits_dprv_0 = io_mem_resp_bits_dprv; // @[Top.scala:30:7]
wire io_mem_resp_bits_dv_0 = io_mem_resp_bits_dv; // @[Top.scala:30:7]
wire [63:0] io_mem_resp_bits_data_0 = io_mem_resp_bits_data; // @[Top.scala:30:7]
wire [7:0] io_mem_resp_bits_mask_0 = io_mem_resp_bits_mask; // @[Top.scala:30:7]
wire io_mem_resp_bits_replay_0 = io_mem_resp_bits_replay; // @[Top.scala:30:7]
wire io_mem_resp_bits_has_data_0 = io_mem_resp_bits_has_data; // @[Top.scala:30:7]
wire [63:0] io_mem_resp_bits_data_word_bypass_0 = io_mem_resp_bits_data_word_bypass; // @[Top.scala:30:7]
wire [63:0] io_mem_resp_bits_data_raw_0 = io_mem_resp_bits_data_raw; // @[Top.scala:30:7]
wire [63:0] io_mem_resp_bits_store_data_0 = io_mem_resp_bits_store_data; // @[Top.scala:30:7]
wire io_exception_0 = io_exception; // @[Top.scala:30:7]
wire io_ptw_0_req_ready_0 = io_ptw_0_req_ready; // @[Top.scala:30:7]
wire io_ptw_0_resp_valid_0 = io_ptw_0_resp_valid; // @[Top.scala:30:7]
wire io_ptw_0_resp_bits_ae_ptw_0 = io_ptw_0_resp_bits_ae_ptw; // @[Top.scala:30:7]
wire io_ptw_0_resp_bits_ae_final_0 = io_ptw_0_resp_bits_ae_final; // @[Top.scala:30:7]
wire io_ptw_0_resp_bits_pf_0 = io_ptw_0_resp_bits_pf; // @[Top.scala:30:7]
wire io_ptw_0_resp_bits_gf_0 = io_ptw_0_resp_bits_gf; // @[Top.scala:30:7]
wire io_ptw_0_resp_bits_hr_0 = io_ptw_0_resp_bits_hr; // @[Top.scala:30:7]
wire io_ptw_0_resp_bits_hw_0 = io_ptw_0_resp_bits_hw; // @[Top.scala:30:7]
wire io_ptw_0_resp_bits_hx_0 = io_ptw_0_resp_bits_hx; // @[Top.scala:30:7]
wire [9:0] io_ptw_0_resp_bits_pte_reserved_for_future_0 = io_ptw_0_resp_bits_pte_reserved_for_future; // @[Top.scala:30:7]
wire [43:0] io_ptw_0_resp_bits_pte_ppn_0 = io_ptw_0_resp_bits_pte_ppn; // @[Top.scala:30:7]
wire [1:0] io_ptw_0_resp_bits_pte_reserved_for_software_0 = io_ptw_0_resp_bits_pte_reserved_for_software; // @[Top.scala:30:7]
wire io_ptw_0_resp_bits_pte_d_0 = io_ptw_0_resp_bits_pte_d; // @[Top.scala:30:7]
wire io_ptw_0_resp_bits_pte_a_0 = io_ptw_0_resp_bits_pte_a; // @[Top.scala:30:7]
wire io_ptw_0_resp_bits_pte_g_0 = io_ptw_0_resp_bits_pte_g; // @[Top.scala:30:7]
wire io_ptw_0_resp_bits_pte_u_0 = io_ptw_0_resp_bits_pte_u; // @[Top.scala:30:7]
wire io_ptw_0_resp_bits_pte_x_0 = io_ptw_0_resp_bits_pte_x; // @[Top.scala:30:7]
wire io_ptw_0_resp_bits_pte_w_0 = io_ptw_0_resp_bits_pte_w; // @[Top.scala:30:7]
wire io_ptw_0_resp_bits_pte_r_0 = io_ptw_0_resp_bits_pte_r; // @[Top.scala:30:7]
wire io_ptw_0_resp_bits_pte_v_0 = io_ptw_0_resp_bits_pte_v; // @[Top.scala:30:7]
wire [1:0] io_ptw_0_resp_bits_level_0 = io_ptw_0_resp_bits_level; // @[Top.scala:30:7]
wire io_ptw_0_resp_bits_homogeneous_0 = io_ptw_0_resp_bits_homogeneous; // @[Top.scala:30:7]
wire io_ptw_0_resp_bits_gpa_valid_0 = io_ptw_0_resp_bits_gpa_valid; // @[Top.scala:30:7]
wire [38:0] io_ptw_0_resp_bits_gpa_bits_0 = io_ptw_0_resp_bits_gpa_bits; // @[Top.scala:30:7]
wire io_ptw_0_resp_bits_gpa_is_pte_0 = io_ptw_0_resp_bits_gpa_is_pte; // @[Top.scala:30:7]
wire [3:0] io_ptw_0_ptbr_mode_0 = io_ptw_0_ptbr_mode; // @[Top.scala:30:7]
wire [43:0] io_ptw_0_ptbr_ppn_0 = io_ptw_0_ptbr_ppn; // @[Top.scala:30:7]
wire io_ptw_0_status_debug_0 = io_ptw_0_status_debug; // @[Top.scala:30:7]
wire io_ptw_0_status_cease_0 = io_ptw_0_status_cease; // @[Top.scala:30:7]
wire io_ptw_0_status_wfi_0 = io_ptw_0_status_wfi; // @[Top.scala:30:7]
wire [31:0] io_ptw_0_status_isa_0 = io_ptw_0_status_isa; // @[Top.scala:30:7]
wire [1:0] io_ptw_0_status_dprv_0 = io_ptw_0_status_dprv; // @[Top.scala:30:7]
wire io_ptw_0_status_dv_0 = io_ptw_0_status_dv; // @[Top.scala:30:7]
wire [1:0] io_ptw_0_status_prv_0 = io_ptw_0_status_prv; // @[Top.scala:30:7]
wire io_ptw_0_status_v_0 = io_ptw_0_status_v; // @[Top.scala:30:7]
wire io_ptw_0_status_mpv_0 = io_ptw_0_status_mpv; // @[Top.scala:30:7]
wire io_ptw_0_status_gva_0 = io_ptw_0_status_gva; // @[Top.scala:30:7]
wire io_ptw_0_status_tsr_0 = io_ptw_0_status_tsr; // @[Top.scala:30:7]
wire io_ptw_0_status_tw_0 = io_ptw_0_status_tw; // @[Top.scala:30:7]
wire io_ptw_0_status_tvm_0 = io_ptw_0_status_tvm; // @[Top.scala:30:7]
wire io_ptw_0_status_mxr_0 = io_ptw_0_status_mxr; // @[Top.scala:30:7]
wire io_ptw_0_status_sum_0 = io_ptw_0_status_sum; // @[Top.scala:30:7]
wire io_ptw_0_status_mprv_0 = io_ptw_0_status_mprv; // @[Top.scala:30:7]
wire [1:0] io_ptw_0_status_fs_0 = io_ptw_0_status_fs; // @[Top.scala:30:7]
wire [1:0] io_ptw_0_status_mpp_0 = io_ptw_0_status_mpp; // @[Top.scala:30:7]
wire io_ptw_0_status_spp_0 = io_ptw_0_status_spp; // @[Top.scala:30:7]
wire io_ptw_0_status_mpie_0 = io_ptw_0_status_mpie; // @[Top.scala:30:7]
wire io_ptw_0_status_spie_0 = io_ptw_0_status_spie; // @[Top.scala:30:7]
wire io_ptw_0_status_mie_0 = io_ptw_0_status_mie; // @[Top.scala:30:7]
wire io_ptw_0_status_sie_0 = io_ptw_0_status_sie; // @[Top.scala:30:7]
wire io_ptw_0_hstatus_spvp_0 = io_ptw_0_hstatus_spvp; // @[Top.scala:30:7]
wire io_ptw_0_hstatus_spv_0 = io_ptw_0_hstatus_spv; // @[Top.scala:30:7]
wire io_ptw_0_hstatus_gva_0 = io_ptw_0_hstatus_gva; // @[Top.scala:30:7]
wire io_ptw_0_gstatus_debug_0 = io_ptw_0_gstatus_debug; // @[Top.scala:30:7]
wire io_ptw_0_gstatus_cease_0 = io_ptw_0_gstatus_cease; // @[Top.scala:30:7]
wire io_ptw_0_gstatus_wfi_0 = io_ptw_0_gstatus_wfi; // @[Top.scala:30:7]
wire [31:0] io_ptw_0_gstatus_isa_0 = io_ptw_0_gstatus_isa; // @[Top.scala:30:7]
wire [1:0] io_ptw_0_gstatus_dprv_0 = io_ptw_0_gstatus_dprv; // @[Top.scala:30:7]
wire io_ptw_0_gstatus_dv_0 = io_ptw_0_gstatus_dv; // @[Top.scala:30:7]
wire [1:0] io_ptw_0_gstatus_prv_0 = io_ptw_0_gstatus_prv; // @[Top.scala:30:7]
wire io_ptw_0_gstatus_v_0 = io_ptw_0_gstatus_v; // @[Top.scala:30:7]
wire [22:0] io_ptw_0_gstatus_zero2_0 = io_ptw_0_gstatus_zero2; // @[Top.scala:30:7]
wire io_ptw_0_gstatus_mpv_0 = io_ptw_0_gstatus_mpv; // @[Top.scala:30:7]
wire io_ptw_0_gstatus_gva_0 = io_ptw_0_gstatus_gva; // @[Top.scala:30:7]
wire io_ptw_0_gstatus_mbe_0 = io_ptw_0_gstatus_mbe; // @[Top.scala:30:7]
wire io_ptw_0_gstatus_sbe_0 = io_ptw_0_gstatus_sbe; // @[Top.scala:30:7]
wire [1:0] io_ptw_0_gstatus_sxl_0 = io_ptw_0_gstatus_sxl; // @[Top.scala:30:7]
wire [7:0] io_ptw_0_gstatus_zero1_0 = io_ptw_0_gstatus_zero1; // @[Top.scala:30:7]
wire io_ptw_0_gstatus_tsr_0 = io_ptw_0_gstatus_tsr; // @[Top.scala:30:7]
wire io_ptw_0_gstatus_tw_0 = io_ptw_0_gstatus_tw; // @[Top.scala:30:7]
wire io_ptw_0_gstatus_tvm_0 = io_ptw_0_gstatus_tvm; // @[Top.scala:30:7]
wire io_ptw_0_gstatus_mxr_0 = io_ptw_0_gstatus_mxr; // @[Top.scala:30:7]
wire io_ptw_0_gstatus_sum_0 = io_ptw_0_gstatus_sum; // @[Top.scala:30:7]
wire io_ptw_0_gstatus_mprv_0 = io_ptw_0_gstatus_mprv; // @[Top.scala:30:7]
wire [1:0] io_ptw_0_gstatus_fs_0 = io_ptw_0_gstatus_fs; // @[Top.scala:30:7]
wire [1:0] io_ptw_0_gstatus_mpp_0 = io_ptw_0_gstatus_mpp; // @[Top.scala:30:7]
wire [1:0] io_ptw_0_gstatus_vs_0 = io_ptw_0_gstatus_vs; // @[Top.scala:30:7]
wire io_ptw_0_gstatus_spp_0 = io_ptw_0_gstatus_spp; // @[Top.scala:30:7]
wire io_ptw_0_gstatus_mpie_0 = io_ptw_0_gstatus_mpie; // @[Top.scala:30:7]
wire io_ptw_0_gstatus_ube_0 = io_ptw_0_gstatus_ube; // @[Top.scala:30:7]
wire io_ptw_0_gstatus_spie_0 = io_ptw_0_gstatus_spie; // @[Top.scala:30:7]
wire io_ptw_0_gstatus_upie_0 = io_ptw_0_gstatus_upie; // @[Top.scala:30:7]
wire io_ptw_0_gstatus_mie_0 = io_ptw_0_gstatus_mie; // @[Top.scala:30:7]
wire io_ptw_0_gstatus_hie_0 = io_ptw_0_gstatus_hie; // @[Top.scala:30:7]
wire io_ptw_0_gstatus_sie_0 = io_ptw_0_gstatus_sie; // @[Top.scala:30:7]
wire io_ptw_0_gstatus_uie_0 = io_ptw_0_gstatus_uie; // @[Top.scala:30:7]
wire io_ptw_0_pmp_0_cfg_l_0 = io_ptw_0_pmp_0_cfg_l; // @[Top.scala:30:7]
wire [1:0] io_ptw_0_pmp_0_cfg_a_0 = io_ptw_0_pmp_0_cfg_a; // @[Top.scala:30:7]
wire io_ptw_0_pmp_0_cfg_x_0 = io_ptw_0_pmp_0_cfg_x; // @[Top.scala:30:7]
wire io_ptw_0_pmp_0_cfg_w_0 = io_ptw_0_pmp_0_cfg_w; // @[Top.scala:30:7]
wire io_ptw_0_pmp_0_cfg_r_0 = io_ptw_0_pmp_0_cfg_r; // @[Top.scala:30:7]
wire [29:0] io_ptw_0_pmp_0_addr_0 = io_ptw_0_pmp_0_addr; // @[Top.scala:30:7]
wire [31:0] io_ptw_0_pmp_0_mask_0 = io_ptw_0_pmp_0_mask; // @[Top.scala:30:7]
wire io_ptw_0_pmp_1_cfg_l_0 = io_ptw_0_pmp_1_cfg_l; // @[Top.scala:30:7]
wire [1:0] io_ptw_0_pmp_1_cfg_a_0 = io_ptw_0_pmp_1_cfg_a; // @[Top.scala:30:7]
wire io_ptw_0_pmp_1_cfg_x_0 = io_ptw_0_pmp_1_cfg_x; // @[Top.scala:30:7]
wire io_ptw_0_pmp_1_cfg_w_0 = io_ptw_0_pmp_1_cfg_w; // @[Top.scala:30:7]
wire io_ptw_0_pmp_1_cfg_r_0 = io_ptw_0_pmp_1_cfg_r; // @[Top.scala:30:7]
wire [29:0] io_ptw_0_pmp_1_addr_0 = io_ptw_0_pmp_1_addr; // @[Top.scala:30:7]
wire [31:0] io_ptw_0_pmp_1_mask_0 = io_ptw_0_pmp_1_mask; // @[Top.scala:30:7]
wire io_ptw_0_pmp_2_cfg_l_0 = io_ptw_0_pmp_2_cfg_l; // @[Top.scala:30:7]
wire [1:0] io_ptw_0_pmp_2_cfg_a_0 = io_ptw_0_pmp_2_cfg_a; // @[Top.scala:30:7]
wire io_ptw_0_pmp_2_cfg_x_0 = io_ptw_0_pmp_2_cfg_x; // @[Top.scala:30:7]
wire io_ptw_0_pmp_2_cfg_w_0 = io_ptw_0_pmp_2_cfg_w; // @[Top.scala:30:7]
wire io_ptw_0_pmp_2_cfg_r_0 = io_ptw_0_pmp_2_cfg_r; // @[Top.scala:30:7]
wire [29:0] io_ptw_0_pmp_2_addr_0 = io_ptw_0_pmp_2_addr; // @[Top.scala:30:7]
wire [31:0] io_ptw_0_pmp_2_mask_0 = io_ptw_0_pmp_2_mask; // @[Top.scala:30:7]
wire io_ptw_0_pmp_3_cfg_l_0 = io_ptw_0_pmp_3_cfg_l; // @[Top.scala:30:7]
wire [1:0] io_ptw_0_pmp_3_cfg_a_0 = io_ptw_0_pmp_3_cfg_a; // @[Top.scala:30:7]
wire io_ptw_0_pmp_3_cfg_x_0 = io_ptw_0_pmp_3_cfg_x; // @[Top.scala:30:7]
wire io_ptw_0_pmp_3_cfg_w_0 = io_ptw_0_pmp_3_cfg_w; // @[Top.scala:30:7]
wire io_ptw_0_pmp_3_cfg_r_0 = io_ptw_0_pmp_3_cfg_r; // @[Top.scala:30:7]
wire [29:0] io_ptw_0_pmp_3_addr_0 = io_ptw_0_pmp_3_addr; // @[Top.scala:30:7]
wire [31:0] io_ptw_0_pmp_3_mask_0 = io_ptw_0_pmp_3_mask; // @[Top.scala:30:7]
wire io_ptw_0_pmp_4_cfg_l_0 = io_ptw_0_pmp_4_cfg_l; // @[Top.scala:30:7]
wire [1:0] io_ptw_0_pmp_4_cfg_a_0 = io_ptw_0_pmp_4_cfg_a; // @[Top.scala:30:7]
wire io_ptw_0_pmp_4_cfg_x_0 = io_ptw_0_pmp_4_cfg_x; // @[Top.scala:30:7]
wire io_ptw_0_pmp_4_cfg_w_0 = io_ptw_0_pmp_4_cfg_w; // @[Top.scala:30:7]
wire io_ptw_0_pmp_4_cfg_r_0 = io_ptw_0_pmp_4_cfg_r; // @[Top.scala:30:7]
wire [29:0] io_ptw_0_pmp_4_addr_0 = io_ptw_0_pmp_4_addr; // @[Top.scala:30:7]
wire [31:0] io_ptw_0_pmp_4_mask_0 = io_ptw_0_pmp_4_mask; // @[Top.scala:30:7]
wire io_ptw_0_pmp_5_cfg_l_0 = io_ptw_0_pmp_5_cfg_l; // @[Top.scala:30:7]
wire [1:0] io_ptw_0_pmp_5_cfg_a_0 = io_ptw_0_pmp_5_cfg_a; // @[Top.scala:30:7]
wire io_ptw_0_pmp_5_cfg_x_0 = io_ptw_0_pmp_5_cfg_x; // @[Top.scala:30:7]
wire io_ptw_0_pmp_5_cfg_w_0 = io_ptw_0_pmp_5_cfg_w; // @[Top.scala:30:7]
wire io_ptw_0_pmp_5_cfg_r_0 = io_ptw_0_pmp_5_cfg_r; // @[Top.scala:30:7]
wire [29:0] io_ptw_0_pmp_5_addr_0 = io_ptw_0_pmp_5_addr; // @[Top.scala:30:7]
wire [31:0] io_ptw_0_pmp_5_mask_0 = io_ptw_0_pmp_5_mask; // @[Top.scala:30:7]
wire io_ptw_0_pmp_6_cfg_l_0 = io_ptw_0_pmp_6_cfg_l; // @[Top.scala:30:7]
wire [1:0] io_ptw_0_pmp_6_cfg_a_0 = io_ptw_0_pmp_6_cfg_a; // @[Top.scala:30:7]
wire io_ptw_0_pmp_6_cfg_x_0 = io_ptw_0_pmp_6_cfg_x; // @[Top.scala:30:7]
wire io_ptw_0_pmp_6_cfg_w_0 = io_ptw_0_pmp_6_cfg_w; // @[Top.scala:30:7]
wire io_ptw_0_pmp_6_cfg_r_0 = io_ptw_0_pmp_6_cfg_r; // @[Top.scala:30:7]
wire [29:0] io_ptw_0_pmp_6_addr_0 = io_ptw_0_pmp_6_addr; // @[Top.scala:30:7]
wire [31:0] io_ptw_0_pmp_6_mask_0 = io_ptw_0_pmp_6_mask; // @[Top.scala:30:7]
wire io_ptw_0_pmp_7_cfg_l_0 = io_ptw_0_pmp_7_cfg_l; // @[Top.scala:30:7]
wire [1:0] io_ptw_0_pmp_7_cfg_a_0 = io_ptw_0_pmp_7_cfg_a; // @[Top.scala:30:7]
wire io_ptw_0_pmp_7_cfg_x_0 = io_ptw_0_pmp_7_cfg_x; // @[Top.scala:30:7]
wire io_ptw_0_pmp_7_cfg_w_0 = io_ptw_0_pmp_7_cfg_w; // @[Top.scala:30:7]
wire io_ptw_0_pmp_7_cfg_r_0 = io_ptw_0_pmp_7_cfg_r; // @[Top.scala:30:7]
wire [29:0] io_ptw_0_pmp_7_addr_0 = io_ptw_0_pmp_7_addr; // @[Top.scala:30:7]
wire [31:0] io_ptw_0_pmp_7_mask_0 = io_ptw_0_pmp_7_mask; // @[Top.scala:30:7]
wire io_ptw_0_customCSRs_csrs_0_ren_0 = io_ptw_0_customCSRs_csrs_0_ren; // @[Top.scala:30:7]
wire io_ptw_0_customCSRs_csrs_0_wen_0 = io_ptw_0_customCSRs_csrs_0_wen; // @[Top.scala:30:7]
wire [63:0] io_ptw_0_customCSRs_csrs_0_wdata_0 = io_ptw_0_customCSRs_csrs_0_wdata; // @[Top.scala:30:7]
wire [63:0] io_ptw_0_customCSRs_csrs_0_value_0 = io_ptw_0_customCSRs_csrs_0_value; // @[Top.scala:30:7]
wire io_ptw_0_customCSRs_csrs_1_ren_0 = io_ptw_0_customCSRs_csrs_1_ren; // @[Top.scala:30:7]
wire io_ptw_0_customCSRs_csrs_1_wen_0 = io_ptw_0_customCSRs_csrs_1_wen; // @[Top.scala:30:7]
wire [63:0] io_ptw_0_customCSRs_csrs_1_wdata_0 = io_ptw_0_customCSRs_csrs_1_wdata; // @[Top.scala:30:7]
wire [63:0] io_ptw_0_customCSRs_csrs_1_value_0 = io_ptw_0_customCSRs_csrs_1_value; // @[Top.scala:30:7]
wire io_ptw_0_customCSRs_csrs_2_ren_0 = io_ptw_0_customCSRs_csrs_2_ren; // @[Top.scala:30:7]
wire io_ptw_0_customCSRs_csrs_2_wen_0 = io_ptw_0_customCSRs_csrs_2_wen; // @[Top.scala:30:7]
wire [63:0] io_ptw_0_customCSRs_csrs_2_wdata_0 = io_ptw_0_customCSRs_csrs_2_wdata; // @[Top.scala:30:7]
wire [63:0] io_ptw_0_customCSRs_csrs_2_value_0 = io_ptw_0_customCSRs_csrs_2_value; // @[Top.scala:30:7]
wire io_ptw_0_customCSRs_csrs_3_ren_0 = io_ptw_0_customCSRs_csrs_3_ren; // @[Top.scala:30:7]
wire io_ptw_0_customCSRs_csrs_3_wen_0 = io_ptw_0_customCSRs_csrs_3_wen; // @[Top.scala:30:7]
wire [63:0] io_ptw_0_customCSRs_csrs_3_wdata_0 = io_ptw_0_customCSRs_csrs_3_wdata; // @[Top.scala:30:7]
wire [63:0] io_ptw_0_customCSRs_csrs_3_value_0 = io_ptw_0_customCSRs_csrs_3_value; // @[Top.scala:30:7]
wire io_ptw_1_req_ready_0 = io_ptw_1_req_ready; // @[Top.scala:30:7]
wire io_ptw_1_resp_valid_0 = io_ptw_1_resp_valid; // @[Top.scala:30:7]
wire io_ptw_1_resp_bits_ae_ptw_0 = io_ptw_1_resp_bits_ae_ptw; // @[Top.scala:30:7]
wire io_ptw_1_resp_bits_ae_final_0 = io_ptw_1_resp_bits_ae_final; // @[Top.scala:30:7]
wire io_ptw_1_resp_bits_pf_0 = io_ptw_1_resp_bits_pf; // @[Top.scala:30:7]
wire io_ptw_1_resp_bits_gf_0 = io_ptw_1_resp_bits_gf; // @[Top.scala:30:7]
wire io_ptw_1_resp_bits_hr_0 = io_ptw_1_resp_bits_hr; // @[Top.scala:30:7]
wire io_ptw_1_resp_bits_hw_0 = io_ptw_1_resp_bits_hw; // @[Top.scala:30:7]
wire io_ptw_1_resp_bits_hx_0 = io_ptw_1_resp_bits_hx; // @[Top.scala:30:7]
wire [9:0] io_ptw_1_resp_bits_pte_reserved_for_future_0 = io_ptw_1_resp_bits_pte_reserved_for_future; // @[Top.scala:30:7]
wire [43:0] io_ptw_1_resp_bits_pte_ppn_0 = io_ptw_1_resp_bits_pte_ppn; // @[Top.scala:30:7]
wire [1:0] io_ptw_1_resp_bits_pte_reserved_for_software_0 = io_ptw_1_resp_bits_pte_reserved_for_software; // @[Top.scala:30:7]
wire io_ptw_1_resp_bits_pte_d_0 = io_ptw_1_resp_bits_pte_d; // @[Top.scala:30:7]
wire io_ptw_1_resp_bits_pte_a_0 = io_ptw_1_resp_bits_pte_a; // @[Top.scala:30:7]
wire io_ptw_1_resp_bits_pte_g_0 = io_ptw_1_resp_bits_pte_g; // @[Top.scala:30:7]
wire io_ptw_1_resp_bits_pte_u_0 = io_ptw_1_resp_bits_pte_u; // @[Top.scala:30:7]
wire io_ptw_1_resp_bits_pte_x_0 = io_ptw_1_resp_bits_pte_x; // @[Top.scala:30:7]
wire io_ptw_1_resp_bits_pte_w_0 = io_ptw_1_resp_bits_pte_w; // @[Top.scala:30:7]
wire io_ptw_1_resp_bits_pte_r_0 = io_ptw_1_resp_bits_pte_r; // @[Top.scala:30:7]
wire io_ptw_1_resp_bits_pte_v_0 = io_ptw_1_resp_bits_pte_v; // @[Top.scala:30:7]
wire [1:0] io_ptw_1_resp_bits_level_0 = io_ptw_1_resp_bits_level; // @[Top.scala:30:7]
wire io_ptw_1_resp_bits_homogeneous_0 = io_ptw_1_resp_bits_homogeneous; // @[Top.scala:30:7]
wire io_ptw_1_resp_bits_gpa_valid_0 = io_ptw_1_resp_bits_gpa_valid; // @[Top.scala:30:7]
wire [38:0] io_ptw_1_resp_bits_gpa_bits_0 = io_ptw_1_resp_bits_gpa_bits; // @[Top.scala:30:7]
wire io_ptw_1_resp_bits_gpa_is_pte_0 = io_ptw_1_resp_bits_gpa_is_pte; // @[Top.scala:30:7]
wire [3:0] io_ptw_1_ptbr_mode_0 = io_ptw_1_ptbr_mode; // @[Top.scala:30:7]
wire [43:0] io_ptw_1_ptbr_ppn_0 = io_ptw_1_ptbr_ppn; // @[Top.scala:30:7]
wire io_ptw_1_status_debug_0 = io_ptw_1_status_debug; // @[Top.scala:30:7]
wire io_ptw_1_status_cease_0 = io_ptw_1_status_cease; // @[Top.scala:30:7]
wire io_ptw_1_status_wfi_0 = io_ptw_1_status_wfi; // @[Top.scala:30:7]
wire [31:0] io_ptw_1_status_isa_0 = io_ptw_1_status_isa; // @[Top.scala:30:7]
wire [1:0] io_ptw_1_status_dprv_0 = io_ptw_1_status_dprv; // @[Top.scala:30:7]
wire io_ptw_1_status_dv_0 = io_ptw_1_status_dv; // @[Top.scala:30:7]
wire [1:0] io_ptw_1_status_prv_0 = io_ptw_1_status_prv; // @[Top.scala:30:7]
wire io_ptw_1_status_v_0 = io_ptw_1_status_v; // @[Top.scala:30:7]
wire io_ptw_1_status_mpv_0 = io_ptw_1_status_mpv; // @[Top.scala:30:7]
wire io_ptw_1_status_gva_0 = io_ptw_1_status_gva; // @[Top.scala:30:7]
wire io_ptw_1_status_tsr_0 = io_ptw_1_status_tsr; // @[Top.scala:30:7]
wire io_ptw_1_status_tw_0 = io_ptw_1_status_tw; // @[Top.scala:30:7]
wire io_ptw_1_status_tvm_0 = io_ptw_1_status_tvm; // @[Top.scala:30:7]
wire io_ptw_1_status_mxr_0 = io_ptw_1_status_mxr; // @[Top.scala:30:7]
wire io_ptw_1_status_sum_0 = io_ptw_1_status_sum; // @[Top.scala:30:7]
wire io_ptw_1_status_mprv_0 = io_ptw_1_status_mprv; // @[Top.scala:30:7]
wire [1:0] io_ptw_1_status_fs_0 = io_ptw_1_status_fs; // @[Top.scala:30:7]
wire [1:0] io_ptw_1_status_mpp_0 = io_ptw_1_status_mpp; // @[Top.scala:30:7]
wire io_ptw_1_status_spp_0 = io_ptw_1_status_spp; // @[Top.scala:30:7]
wire io_ptw_1_status_mpie_0 = io_ptw_1_status_mpie; // @[Top.scala:30:7]
wire io_ptw_1_status_spie_0 = io_ptw_1_status_spie; // @[Top.scala:30:7]
wire io_ptw_1_status_mie_0 = io_ptw_1_status_mie; // @[Top.scala:30:7]
wire io_ptw_1_status_sie_0 = io_ptw_1_status_sie; // @[Top.scala:30:7]
wire io_ptw_1_hstatus_spvp_0 = io_ptw_1_hstatus_spvp; // @[Top.scala:30:7]
wire io_ptw_1_hstatus_spv_0 = io_ptw_1_hstatus_spv; // @[Top.scala:30:7]
wire io_ptw_1_hstatus_gva_0 = io_ptw_1_hstatus_gva; // @[Top.scala:30:7]
wire io_ptw_1_gstatus_debug_0 = io_ptw_1_gstatus_debug; // @[Top.scala:30:7]
wire io_ptw_1_gstatus_cease_0 = io_ptw_1_gstatus_cease; // @[Top.scala:30:7]
wire io_ptw_1_gstatus_wfi_0 = io_ptw_1_gstatus_wfi; // @[Top.scala:30:7]
wire [31:0] io_ptw_1_gstatus_isa_0 = io_ptw_1_gstatus_isa; // @[Top.scala:30:7]
wire [1:0] io_ptw_1_gstatus_dprv_0 = io_ptw_1_gstatus_dprv; // @[Top.scala:30:7]
wire io_ptw_1_gstatus_dv_0 = io_ptw_1_gstatus_dv; // @[Top.scala:30:7]
wire [1:0] io_ptw_1_gstatus_prv_0 = io_ptw_1_gstatus_prv; // @[Top.scala:30:7]
wire io_ptw_1_gstatus_v_0 = io_ptw_1_gstatus_v; // @[Top.scala:30:7]
wire [22:0] io_ptw_1_gstatus_zero2_0 = io_ptw_1_gstatus_zero2; // @[Top.scala:30:7]
wire io_ptw_1_gstatus_mpv_0 = io_ptw_1_gstatus_mpv; // @[Top.scala:30:7]
wire io_ptw_1_gstatus_gva_0 = io_ptw_1_gstatus_gva; // @[Top.scala:30:7]
wire io_ptw_1_gstatus_mbe_0 = io_ptw_1_gstatus_mbe; // @[Top.scala:30:7]
wire io_ptw_1_gstatus_sbe_0 = io_ptw_1_gstatus_sbe; // @[Top.scala:30:7]
wire [1:0] io_ptw_1_gstatus_sxl_0 = io_ptw_1_gstatus_sxl; // @[Top.scala:30:7]
wire [7:0] io_ptw_1_gstatus_zero1_0 = io_ptw_1_gstatus_zero1; // @[Top.scala:30:7]
wire io_ptw_1_gstatus_tsr_0 = io_ptw_1_gstatus_tsr; // @[Top.scala:30:7]
wire io_ptw_1_gstatus_tw_0 = io_ptw_1_gstatus_tw; // @[Top.scala:30:7]
wire io_ptw_1_gstatus_tvm_0 = io_ptw_1_gstatus_tvm; // @[Top.scala:30:7]
wire io_ptw_1_gstatus_mxr_0 = io_ptw_1_gstatus_mxr; // @[Top.scala:30:7]
wire io_ptw_1_gstatus_sum_0 = io_ptw_1_gstatus_sum; // @[Top.scala:30:7]
wire io_ptw_1_gstatus_mprv_0 = io_ptw_1_gstatus_mprv; // @[Top.scala:30:7]
wire [1:0] io_ptw_1_gstatus_fs_0 = io_ptw_1_gstatus_fs; // @[Top.scala:30:7]
wire [1:0] io_ptw_1_gstatus_mpp_0 = io_ptw_1_gstatus_mpp; // @[Top.scala:30:7]
wire [1:0] io_ptw_1_gstatus_vs_0 = io_ptw_1_gstatus_vs; // @[Top.scala:30:7]
wire io_ptw_1_gstatus_spp_0 = io_ptw_1_gstatus_spp; // @[Top.scala:30:7]
wire io_ptw_1_gstatus_mpie_0 = io_ptw_1_gstatus_mpie; // @[Top.scala:30:7]
wire io_ptw_1_gstatus_ube_0 = io_ptw_1_gstatus_ube; // @[Top.scala:30:7]
wire io_ptw_1_gstatus_spie_0 = io_ptw_1_gstatus_spie; // @[Top.scala:30:7]
wire io_ptw_1_gstatus_upie_0 = io_ptw_1_gstatus_upie; // @[Top.scala:30:7]
wire io_ptw_1_gstatus_mie_0 = io_ptw_1_gstatus_mie; // @[Top.scala:30:7]
wire io_ptw_1_gstatus_hie_0 = io_ptw_1_gstatus_hie; // @[Top.scala:30:7]
wire io_ptw_1_gstatus_sie_0 = io_ptw_1_gstatus_sie; // @[Top.scala:30:7]
wire io_ptw_1_gstatus_uie_0 = io_ptw_1_gstatus_uie; // @[Top.scala:30:7]
wire io_ptw_1_pmp_0_cfg_l_0 = io_ptw_1_pmp_0_cfg_l; // @[Top.scala:30:7]
wire [1:0] io_ptw_1_pmp_0_cfg_a_0 = io_ptw_1_pmp_0_cfg_a; // @[Top.scala:30:7]
wire io_ptw_1_pmp_0_cfg_x_0 = io_ptw_1_pmp_0_cfg_x; // @[Top.scala:30:7]
wire io_ptw_1_pmp_0_cfg_w_0 = io_ptw_1_pmp_0_cfg_w; // @[Top.scala:30:7]
wire io_ptw_1_pmp_0_cfg_r_0 = io_ptw_1_pmp_0_cfg_r; // @[Top.scala:30:7]
wire [29:0] io_ptw_1_pmp_0_addr_0 = io_ptw_1_pmp_0_addr; // @[Top.scala:30:7]
wire [31:0] io_ptw_1_pmp_0_mask_0 = io_ptw_1_pmp_0_mask; // @[Top.scala:30:7]
wire io_ptw_1_pmp_1_cfg_l_0 = io_ptw_1_pmp_1_cfg_l; // @[Top.scala:30:7]
wire [1:0] io_ptw_1_pmp_1_cfg_a_0 = io_ptw_1_pmp_1_cfg_a; // @[Top.scala:30:7]
wire io_ptw_1_pmp_1_cfg_x_0 = io_ptw_1_pmp_1_cfg_x; // @[Top.scala:30:7]
wire io_ptw_1_pmp_1_cfg_w_0 = io_ptw_1_pmp_1_cfg_w; // @[Top.scala:30:7]
wire io_ptw_1_pmp_1_cfg_r_0 = io_ptw_1_pmp_1_cfg_r; // @[Top.scala:30:7]
wire [29:0] io_ptw_1_pmp_1_addr_0 = io_ptw_1_pmp_1_addr; // @[Top.scala:30:7]
wire [31:0] io_ptw_1_pmp_1_mask_0 = io_ptw_1_pmp_1_mask; // @[Top.scala:30:7]
wire io_ptw_1_pmp_2_cfg_l_0 = io_ptw_1_pmp_2_cfg_l; // @[Top.scala:30:7]
wire [1:0] io_ptw_1_pmp_2_cfg_a_0 = io_ptw_1_pmp_2_cfg_a; // @[Top.scala:30:7]
wire io_ptw_1_pmp_2_cfg_x_0 = io_ptw_1_pmp_2_cfg_x; // @[Top.scala:30:7]
wire io_ptw_1_pmp_2_cfg_w_0 = io_ptw_1_pmp_2_cfg_w; // @[Top.scala:30:7]
wire io_ptw_1_pmp_2_cfg_r_0 = io_ptw_1_pmp_2_cfg_r; // @[Top.scala:30:7]
wire [29:0] io_ptw_1_pmp_2_addr_0 = io_ptw_1_pmp_2_addr; // @[Top.scala:30:7]
wire [31:0] io_ptw_1_pmp_2_mask_0 = io_ptw_1_pmp_2_mask; // @[Top.scala:30:7]
wire io_ptw_1_pmp_3_cfg_l_0 = io_ptw_1_pmp_3_cfg_l; // @[Top.scala:30:7]
wire [1:0] io_ptw_1_pmp_3_cfg_a_0 = io_ptw_1_pmp_3_cfg_a; // @[Top.scala:30:7]
wire io_ptw_1_pmp_3_cfg_x_0 = io_ptw_1_pmp_3_cfg_x; // @[Top.scala:30:7]
wire io_ptw_1_pmp_3_cfg_w_0 = io_ptw_1_pmp_3_cfg_w; // @[Top.scala:30:7]
wire io_ptw_1_pmp_3_cfg_r_0 = io_ptw_1_pmp_3_cfg_r; // @[Top.scala:30:7]
wire [29:0] io_ptw_1_pmp_3_addr_0 = io_ptw_1_pmp_3_addr; // @[Top.scala:30:7]
wire [31:0] io_ptw_1_pmp_3_mask_0 = io_ptw_1_pmp_3_mask; // @[Top.scala:30:7]
wire io_ptw_1_pmp_4_cfg_l_0 = io_ptw_1_pmp_4_cfg_l; // @[Top.scala:30:7]
wire [1:0] io_ptw_1_pmp_4_cfg_a_0 = io_ptw_1_pmp_4_cfg_a; // @[Top.scala:30:7]
wire io_ptw_1_pmp_4_cfg_x_0 = io_ptw_1_pmp_4_cfg_x; // @[Top.scala:30:7]
wire io_ptw_1_pmp_4_cfg_w_0 = io_ptw_1_pmp_4_cfg_w; // @[Top.scala:30:7]
wire io_ptw_1_pmp_4_cfg_r_0 = io_ptw_1_pmp_4_cfg_r; // @[Top.scala:30:7]
wire [29:0] io_ptw_1_pmp_4_addr_0 = io_ptw_1_pmp_4_addr; // @[Top.scala:30:7]
wire [31:0] io_ptw_1_pmp_4_mask_0 = io_ptw_1_pmp_4_mask; // @[Top.scala:30:7]
wire io_ptw_1_pmp_5_cfg_l_0 = io_ptw_1_pmp_5_cfg_l; // @[Top.scala:30:7]
wire [1:0] io_ptw_1_pmp_5_cfg_a_0 = io_ptw_1_pmp_5_cfg_a; // @[Top.scala:30:7]
wire io_ptw_1_pmp_5_cfg_x_0 = io_ptw_1_pmp_5_cfg_x; // @[Top.scala:30:7]
wire io_ptw_1_pmp_5_cfg_w_0 = io_ptw_1_pmp_5_cfg_w; // @[Top.scala:30:7]
wire io_ptw_1_pmp_5_cfg_r_0 = io_ptw_1_pmp_5_cfg_r; // @[Top.scala:30:7]
wire [29:0] io_ptw_1_pmp_5_addr_0 = io_ptw_1_pmp_5_addr; // @[Top.scala:30:7]
wire [31:0] io_ptw_1_pmp_5_mask_0 = io_ptw_1_pmp_5_mask; // @[Top.scala:30:7]
wire io_ptw_1_pmp_6_cfg_l_0 = io_ptw_1_pmp_6_cfg_l; // @[Top.scala:30:7]
wire [1:0] io_ptw_1_pmp_6_cfg_a_0 = io_ptw_1_pmp_6_cfg_a; // @[Top.scala:30:7]
wire io_ptw_1_pmp_6_cfg_x_0 = io_ptw_1_pmp_6_cfg_x; // @[Top.scala:30:7]
wire io_ptw_1_pmp_6_cfg_w_0 = io_ptw_1_pmp_6_cfg_w; // @[Top.scala:30:7]
wire io_ptw_1_pmp_6_cfg_r_0 = io_ptw_1_pmp_6_cfg_r; // @[Top.scala:30:7]
wire [29:0] io_ptw_1_pmp_6_addr_0 = io_ptw_1_pmp_6_addr; // @[Top.scala:30:7]
wire [31:0] io_ptw_1_pmp_6_mask_0 = io_ptw_1_pmp_6_mask; // @[Top.scala:30:7]
wire io_ptw_1_pmp_7_cfg_l_0 = io_ptw_1_pmp_7_cfg_l; // @[Top.scala:30:7]
wire [1:0] io_ptw_1_pmp_7_cfg_a_0 = io_ptw_1_pmp_7_cfg_a; // @[Top.scala:30:7]
wire io_ptw_1_pmp_7_cfg_x_0 = io_ptw_1_pmp_7_cfg_x; // @[Top.scala:30:7]
wire io_ptw_1_pmp_7_cfg_w_0 = io_ptw_1_pmp_7_cfg_w; // @[Top.scala:30:7]
wire io_ptw_1_pmp_7_cfg_r_0 = io_ptw_1_pmp_7_cfg_r; // @[Top.scala:30:7]
wire [29:0] io_ptw_1_pmp_7_addr_0 = io_ptw_1_pmp_7_addr; // @[Top.scala:30:7]
wire [31:0] io_ptw_1_pmp_7_mask_0 = io_ptw_1_pmp_7_mask; // @[Top.scala:30:7]
wire io_ptw_1_customCSRs_csrs_0_ren_0 = io_ptw_1_customCSRs_csrs_0_ren; // @[Top.scala:30:7]
wire io_ptw_1_customCSRs_csrs_0_wen_0 = io_ptw_1_customCSRs_csrs_0_wen; // @[Top.scala:30:7]
wire [63:0] io_ptw_1_customCSRs_csrs_0_wdata_0 = io_ptw_1_customCSRs_csrs_0_wdata; // @[Top.scala:30:7]
wire [63:0] io_ptw_1_customCSRs_csrs_0_value_0 = io_ptw_1_customCSRs_csrs_0_value; // @[Top.scala:30:7]
wire io_ptw_1_customCSRs_csrs_1_ren_0 = io_ptw_1_customCSRs_csrs_1_ren; // @[Top.scala:30:7]
wire io_ptw_1_customCSRs_csrs_1_wen_0 = io_ptw_1_customCSRs_csrs_1_wen; // @[Top.scala:30:7]
wire [63:0] io_ptw_1_customCSRs_csrs_1_wdata_0 = io_ptw_1_customCSRs_csrs_1_wdata; // @[Top.scala:30:7]
wire [63:0] io_ptw_1_customCSRs_csrs_1_value_0 = io_ptw_1_customCSRs_csrs_1_value; // @[Top.scala:30:7]
wire io_ptw_1_customCSRs_csrs_2_ren_0 = io_ptw_1_customCSRs_csrs_2_ren; // @[Top.scala:30:7]
wire io_ptw_1_customCSRs_csrs_2_wen_0 = io_ptw_1_customCSRs_csrs_2_wen; // @[Top.scala:30:7]
wire [63:0] io_ptw_1_customCSRs_csrs_2_wdata_0 = io_ptw_1_customCSRs_csrs_2_wdata; // @[Top.scala:30:7]
wire [63:0] io_ptw_1_customCSRs_csrs_2_value_0 = io_ptw_1_customCSRs_csrs_2_value; // @[Top.scala:30:7]
wire io_ptw_1_customCSRs_csrs_3_ren_0 = io_ptw_1_customCSRs_csrs_3_ren; // @[Top.scala:30:7]
wire io_ptw_1_customCSRs_csrs_3_wen_0 = io_ptw_1_customCSRs_csrs_3_wen; // @[Top.scala:30:7]
wire [63:0] io_ptw_1_customCSRs_csrs_3_wdata_0 = io_ptw_1_customCSRs_csrs_3_wdata; // @[Top.scala:30:7]
wire [63:0] io_ptw_1_customCSRs_csrs_3_value_0 = io_ptw_1_customCSRs_csrs_3_value; // @[Top.scala:30:7]
wire [64:0] io_fpu_req_bits_in1 = 65'h0; // @[Top.scala:30:7]
wire [64:0] io_fpu_req_bits_in2 = 65'h0; // @[Top.scala:30:7]
wire [64:0] io_fpu_req_bits_in3 = 65'h0; // @[Top.scala:30:7]
wire [64:0] io_fpu_resp_bits_data = 65'h0; // @[Top.scala:30:7]
wire [2:0] io_fpu_req_bits_rm = 3'h0; // @[Top.scala:30:7]
wire [1:0] io_ptw_0_status_sxl = 2'h2; // @[LazyRoCC.scala:78:14]
wire [1:0] io_ptw_0_status_uxl = 2'h2; // @[LazyRoCC.scala:78:14]
wire [1:0] io_ptw_0_hstatus_vsxl = 2'h2; // @[LazyRoCC.scala:78:14]
wire [1:0] io_ptw_0_gstatus_uxl = 2'h2; // @[LazyRoCC.scala:78:14]
wire [1:0] io_ptw_1_status_sxl = 2'h2; // @[LazyRoCC.scala:78:14]
wire [1:0] io_ptw_1_status_uxl = 2'h2; // @[LazyRoCC.scala:78:14]
wire [1:0] io_ptw_1_hstatus_vsxl = 2'h2; // @[LazyRoCC.scala:78:14]
wire [1:0] io_ptw_1_gstatus_uxl = 2'h2; // @[LazyRoCC.scala:78:14]
wire [31:0] io_mem_s2_paddr = 32'h0; // @[LazyRoCC.scala:78:14]
wire [63:0] io_mem_req_bits_data = 64'h0; // @[Top.scala:30:7]
wire [63:0] io_mem_s1_data_data = 64'h0; // @[Top.scala:30:7]
wire [63:0] io_ptw_0_customCSRs_csrs_0_sdata = 64'h0; // @[Top.scala:30:7]
wire [63:0] io_ptw_0_customCSRs_csrs_1_sdata = 64'h0; // @[Top.scala:30:7]
wire [63:0] io_ptw_0_customCSRs_csrs_2_sdata = 64'h0; // @[Top.scala:30:7]
wire [63:0] io_ptw_0_customCSRs_csrs_3_sdata = 64'h0; // @[Top.scala:30:7]
wire [63:0] io_ptw_1_customCSRs_csrs_0_sdata = 64'h0; // @[Top.scala:30:7]
wire [63:0] io_ptw_1_customCSRs_csrs_1_sdata = 64'h0; // @[Top.scala:30:7]
wire [63:0] io_ptw_1_customCSRs_csrs_2_sdata = 64'h0; // @[Top.scala:30:7]
wire [63:0] io_ptw_1_customCSRs_csrs_3_sdata = 64'h0; // @[Top.scala:30:7]
wire [39:0] io_mem_req_bits_addr = 40'h0; // @[Top.scala:30:7]
wire [39:0] io_mem_s2_gpa = 40'h0; // @[Top.scala:30:7]
wire [4:0] io_mem_req_bits_cmd = 5'h0; // @[Top.scala:30:7]
wire [4:0] io_ptw_0_hstatus_zero1 = 5'h0; // @[Top.scala:30:7]
wire [4:0] io_ptw_1_hstatus_zero1 = 5'h0; // @[Top.scala:30:7]
wire [4:0] io_fpu_resp_bits_exc = 5'h0; // @[Top.scala:30:7]
wire [5:0] io_ptw_0_hstatus_vgein = 6'h0; // @[LazyRoCC.scala:78:14]
wire [5:0] io_ptw_1_hstatus_vgein = 6'h0; // @[LazyRoCC.scala:78:14]
wire [8:0] io_ptw_0_hstatus_zero5 = 9'h0; // @[LazyRoCC.scala:78:14]
wire [8:0] io_ptw_1_hstatus_zero5 = 9'h0; // @[LazyRoCC.scala:78:14]
wire [29:0] io_ptw_0_hstatus_zero6 = 30'h0; // @[LazyRoCC.scala:78:14]
wire [29:0] io_ptw_1_hstatus_zero6 = 30'h0; // @[LazyRoCC.scala:78:14]
wire [1:0] io_mem_req_bits_size = 2'h0; // @[Top.scala:30:7]
wire [1:0] io_mem_req_bits_dprv = 2'h0; // @[Top.scala:30:7]
wire [1:0] io_ptw_0_status_vs = 2'h0; // @[Top.scala:30:7]
wire [1:0] io_ptw_0_hstatus_zero3 = 2'h0; // @[Top.scala:30:7]
wire [1:0] io_ptw_0_hstatus_zero2 = 2'h0; // @[Top.scala:30:7]
wire [1:0] io_ptw_0_pmp_0_cfg_res = 2'h0; // @[Top.scala:30:7]
wire [1:0] io_ptw_0_pmp_1_cfg_res = 2'h0; // @[Top.scala:30:7]
wire [1:0] io_ptw_0_pmp_2_cfg_res = 2'h0; // @[Top.scala:30:7]
wire [1:0] io_ptw_0_pmp_3_cfg_res = 2'h0; // @[Top.scala:30:7]
wire [1:0] io_ptw_0_pmp_4_cfg_res = 2'h0; // @[Top.scala:30:7]
wire [1:0] io_ptw_0_pmp_5_cfg_res = 2'h0; // @[Top.scala:30:7]
wire [1:0] io_ptw_0_pmp_6_cfg_res = 2'h0; // @[Top.scala:30:7]
wire [1:0] io_ptw_0_pmp_7_cfg_res = 2'h0; // @[Top.scala:30:7]
wire [1:0] io_ptw_1_status_vs = 2'h0; // @[Top.scala:30:7]
wire [1:0] io_ptw_1_hstatus_zero3 = 2'h0; // @[Top.scala:30:7]
wire [1:0] io_ptw_1_hstatus_zero2 = 2'h0; // @[Top.scala:30:7]
wire [1:0] io_ptw_1_pmp_0_cfg_res = 2'h0; // @[Top.scala:30:7]
wire [1:0] io_ptw_1_pmp_1_cfg_res = 2'h0; // @[Top.scala:30:7]
wire [1:0] io_ptw_1_pmp_2_cfg_res = 2'h0; // @[Top.scala:30:7]
wire [1:0] io_ptw_1_pmp_3_cfg_res = 2'h0; // @[Top.scala:30:7]
wire [1:0] io_ptw_1_pmp_4_cfg_res = 2'h0; // @[Top.scala:30:7]
wire [1:0] io_ptw_1_pmp_5_cfg_res = 2'h0; // @[Top.scala:30:7]
wire [1:0] io_ptw_1_pmp_6_cfg_res = 2'h0; // @[Top.scala:30:7]
wire [1:0] io_ptw_1_pmp_7_cfg_res = 2'h0; // @[Top.scala:30:7]
wire [1:0] io_fpu_req_bits_typeTagIn = 2'h0; // @[Top.scala:30:7]
wire [1:0] io_fpu_req_bits_typeTagOut = 2'h0; // @[Top.scala:30:7]
wire [1:0] io_fpu_req_bits_fmaCmd = 2'h0; // @[Top.scala:30:7]
wire [1:0] io_fpu_req_bits_typ = 2'h0; // @[Top.scala:30:7]
wire [1:0] io_fpu_req_bits_fmt = 2'h0; // @[Top.scala:30:7]
wire [1:0] io_ptw_0_status_xs = 2'h3; // @[LazyRoCC.scala:78:14]
wire [1:0] io_ptw_0_gstatus_xs = 2'h3; // @[LazyRoCC.scala:78:14]
wire [1:0] io_ptw_1_status_xs = 2'h3; // @[LazyRoCC.scala:78:14]
wire [1:0] io_ptw_1_gstatus_xs = 2'h3; // @[LazyRoCC.scala:78:14]
wire [7:0] io_mem_req_bits_tag = 8'h0; // @[Top.scala:30:7]
wire [7:0] io_mem_req_bits_mask = 8'h0; // @[Top.scala:30:7]
wire [7:0] io_mem_s1_data_mask = 8'h0; // @[Top.scala:30:7]
wire [7:0] io_ptw_0_status_zero1 = 8'h0; // @[Top.scala:30:7]
wire [7:0] io_ptw_1_status_zero1 = 8'h0; // @[Top.scala:30:7]
wire [22:0] io_ptw_0_status_zero2 = 23'h0; // @[LazyRoCC.scala:78:14]
wire [22:0] io_ptw_1_status_zero2 = 23'h0; // @[LazyRoCC.scala:78:14]
wire io_mem_keep_clock_enabled = 1'h1; // @[Top.scala:30:7]
wire io_ptw_0_req_bits_valid = 1'h1; // @[Top.scala:30:7]
wire io_ptw_0_status_sd = 1'h1; // @[Top.scala:30:7]
wire io_ptw_0_gstatus_sd = 1'h1; // @[Top.scala:30:7]
wire io_ptw_1_req_bits_valid = 1'h1; // @[Top.scala:30:7]
wire io_ptw_1_status_sd = 1'h1; // @[Top.scala:30:7]
wire io_ptw_1_gstatus_sd = 1'h1; // @[Top.scala:30:7]
wire [43:0] io_ptw_0_hgatp_ppn = 44'h0; // @[LazyRoCC.scala:78:14]
wire [43:0] io_ptw_0_vsatp_ppn = 44'h0; // @[LazyRoCC.scala:78:14]
wire [43:0] io_ptw_1_hgatp_ppn = 44'h0; // @[LazyRoCC.scala:78:14]
wire [43:0] io_ptw_1_vsatp_ppn = 44'h0; // @[LazyRoCC.scala:78:14]
wire [3:0] io_ptw_0_hgatp_mode = 4'h0; // @[LazyRoCC.scala:78:14]
wire [3:0] io_ptw_0_vsatp_mode = 4'h0; // @[LazyRoCC.scala:78:14]
wire [3:0] io_ptw_1_hgatp_mode = 4'h0; // @[LazyRoCC.scala:78:14]
wire [3:0] io_ptw_1_vsatp_mode = 4'h0; // @[LazyRoCC.scala:78:14]
wire [15:0] io_ptw_0_ptbr_asid = 16'h0; // @[LazyRoCC.scala:78:14]
wire [15:0] io_ptw_0_hgatp_asid = 16'h0; // @[LazyRoCC.scala:78:14]
wire [15:0] io_ptw_0_vsatp_asid = 16'h0; // @[LazyRoCC.scala:78:14]
wire [15:0] io_ptw_1_ptbr_asid = 16'h0; // @[LazyRoCC.scala:78:14]
wire [15:0] io_ptw_1_hgatp_asid = 16'h0; // @[LazyRoCC.scala:78:14]
wire [15:0] io_ptw_1_vsatp_asid = 16'h0; // @[LazyRoCC.scala:78:14]
wire io_mem_req_valid = 1'h0; // @[Top.scala:30:7]
wire io_mem_req_bits_signed = 1'h0; // @[Top.scala:30:7]
wire io_mem_req_bits_dv = 1'h0; // @[Top.scala:30:7]
wire io_mem_req_bits_phys = 1'h0; // @[Top.scala:30:7]
wire io_mem_req_bits_no_resp = 1'h0; // @[Top.scala:30:7]
wire io_mem_req_bits_no_alloc = 1'h0; // @[Top.scala:30:7]
wire io_mem_req_bits_no_xcpt = 1'h0; // @[Top.scala:30:7]
wire io_mem_s1_kill = 1'h0; // @[Top.scala:30:7]
wire io_mem_s2_nack = 1'h0; // @[Top.scala:30:7]
wire io_mem_s2_nack_cause_raw = 1'h0; // @[Top.scala:30:7]
wire io_mem_s2_kill = 1'h0; // @[Top.scala:30:7]
wire io_mem_s2_uncached = 1'h0; // @[Top.scala:30:7]
wire io_mem_replay_next = 1'h0; // @[Top.scala:30:7]
wire io_mem_s2_xcpt_ma_ld = 1'h0; // @[Top.scala:30:7]
wire io_mem_s2_xcpt_ma_st = 1'h0; // @[Top.scala:30:7]
wire io_mem_s2_xcpt_pf_ld = 1'h0; // @[Top.scala:30:7]
wire io_mem_s2_xcpt_pf_st = 1'h0; // @[Top.scala:30:7]
wire io_mem_s2_xcpt_gf_ld = 1'h0; // @[Top.scala:30:7]
wire io_mem_s2_xcpt_gf_st = 1'h0; // @[Top.scala:30:7]
wire io_mem_s2_xcpt_ae_ld = 1'h0; // @[Top.scala:30:7]
wire io_mem_s2_xcpt_ae_st = 1'h0; // @[Top.scala:30:7]
wire io_mem_s2_gpa_is_pte = 1'h0; // @[Top.scala:30:7]
wire io_mem_ordered = 1'h0; // @[Top.scala:30:7]
wire io_mem_store_pending = 1'h0; // @[Top.scala:30:7]
wire io_mem_perf_acquire = 1'h0; // @[Top.scala:30:7]
wire io_mem_perf_release = 1'h0; // @[Top.scala:30:7]
wire io_mem_perf_grant = 1'h0; // @[Top.scala:30:7]
wire io_mem_perf_tlbMiss = 1'h0; // @[Top.scala:30:7]
wire io_mem_perf_blocked = 1'h0; // @[Top.scala:30:7]
wire io_mem_perf_canAcceptStoreThenLoad = 1'h0; // @[Top.scala:30:7]
wire io_mem_perf_canAcceptStoreThenRMW = 1'h0; // @[Top.scala:30:7]
wire io_mem_perf_canAcceptLoadThenLoad = 1'h0; // @[Top.scala:30:7]
wire io_mem_perf_storeBufferEmptyAfterLoad = 1'h0; // @[Top.scala:30:7]
wire io_mem_perf_storeBufferEmptyAfterStore = 1'h0; // @[Top.scala:30:7]
wire io_mem_clock_enabled = 1'h0; // @[Top.scala:30:7]
wire io_busy = 1'h0; // @[Top.scala:30:7]
wire io_interrupt = 1'h0; // @[Top.scala:30:7]
wire io_ptw_0_req_bits_bits_vstage1 = 1'h0; // @[Top.scala:30:7]
wire io_ptw_0_req_bits_bits_stage2 = 1'h0; // @[Top.scala:30:7]
wire io_ptw_0_resp_bits_fragmented_superpage = 1'h0; // @[Top.scala:30:7]
wire io_ptw_0_status_mbe = 1'h0; // @[Top.scala:30:7]
wire io_ptw_0_status_sbe = 1'h0; // @[Top.scala:30:7]
wire io_ptw_0_status_sd_rv32 = 1'h0; // @[Top.scala:30:7]
wire io_ptw_0_status_ube = 1'h0; // @[Top.scala:30:7]
wire io_ptw_0_status_upie = 1'h0; // @[Top.scala:30:7]
wire io_ptw_0_status_hie = 1'h0; // @[Top.scala:30:7]
wire io_ptw_0_status_uie = 1'h0; // @[Top.scala:30:7]
wire io_ptw_0_hstatus_vtsr = 1'h0; // @[Top.scala:30:7]
wire io_ptw_0_hstatus_vtw = 1'h0; // @[Top.scala:30:7]
wire io_ptw_0_hstatus_vtvm = 1'h0; // @[Top.scala:30:7]
wire io_ptw_0_hstatus_hu = 1'h0; // @[Top.scala:30:7]
wire io_ptw_0_hstatus_vsbe = 1'h0; // @[Top.scala:30:7]
wire io_ptw_0_gstatus_sd_rv32 = 1'h0; // @[Top.scala:30:7]
wire io_ptw_0_customCSRs_csrs_0_stall = 1'h0; // @[Top.scala:30:7]
wire io_ptw_0_customCSRs_csrs_0_set = 1'h0; // @[Top.scala:30:7]
wire io_ptw_0_customCSRs_csrs_1_stall = 1'h0; // @[Top.scala:30:7]
wire io_ptw_0_customCSRs_csrs_1_set = 1'h0; // @[Top.scala:30:7]
wire io_ptw_0_customCSRs_csrs_2_stall = 1'h0; // @[Top.scala:30:7]
wire io_ptw_0_customCSRs_csrs_2_set = 1'h0; // @[Top.scala:30:7]
wire io_ptw_0_customCSRs_csrs_3_stall = 1'h0; // @[Top.scala:30:7]
wire io_ptw_0_customCSRs_csrs_3_set = 1'h0; // @[Top.scala:30:7]
wire io_ptw_1_req_bits_bits_vstage1 = 1'h0; // @[Top.scala:30:7]
wire io_ptw_1_req_bits_bits_stage2 = 1'h0; // @[Top.scala:30:7]
wire io_ptw_1_resp_bits_fragmented_superpage = 1'h0; // @[Top.scala:30:7]
wire io_ptw_1_status_mbe = 1'h0; // @[Top.scala:30:7]
wire io_ptw_1_status_sbe = 1'h0; // @[Top.scala:30:7]
wire io_ptw_1_status_sd_rv32 = 1'h0; // @[Top.scala:30:7]
wire io_ptw_1_status_ube = 1'h0; // @[Top.scala:30:7]
wire io_ptw_1_status_upie = 1'h0; // @[Top.scala:30:7]
wire io_ptw_1_status_hie = 1'h0; // @[Top.scala:30:7]
wire io_ptw_1_status_uie = 1'h0; // @[Top.scala:30:7]
wire io_ptw_1_hstatus_vtsr = 1'h0; // @[Top.scala:30:7]
wire io_ptw_1_hstatus_vtw = 1'h0; // @[Top.scala:30:7]
wire io_ptw_1_hstatus_vtvm = 1'h0; // @[Top.scala:30:7]
wire io_ptw_1_hstatus_hu = 1'h0; // @[Top.scala:30:7]
wire io_ptw_1_hstatus_vsbe = 1'h0; // @[Top.scala:30:7]
wire io_ptw_1_gstatus_sd_rv32 = 1'h0; // @[Top.scala:30:7]
wire io_ptw_1_customCSRs_csrs_0_stall = 1'h0; // @[Top.scala:30:7]
wire io_ptw_1_customCSRs_csrs_0_set = 1'h0; // @[Top.scala:30:7]
wire io_ptw_1_customCSRs_csrs_1_stall = 1'h0; // @[Top.scala:30:7]
wire io_ptw_1_customCSRs_csrs_1_set = 1'h0; // @[Top.scala:30:7]
wire io_ptw_1_customCSRs_csrs_2_stall = 1'h0; // @[Top.scala:30:7]
wire io_ptw_1_customCSRs_csrs_2_set = 1'h0; // @[Top.scala:30:7]
wire io_ptw_1_customCSRs_csrs_3_stall = 1'h0; // @[Top.scala:30:7]
wire io_ptw_1_customCSRs_csrs_3_set = 1'h0; // @[Top.scala:30:7]
wire io_fpu_req_ready = 1'h0; // @[Top.scala:30:7]
wire io_fpu_req_valid = 1'h0; // @[Top.scala:30:7]
wire io_fpu_req_bits_ldst = 1'h0; // @[Top.scala:30:7]
wire io_fpu_req_bits_wen = 1'h0; // @[Top.scala:30:7]
wire io_fpu_req_bits_ren1 = 1'h0; // @[Top.scala:30:7]
wire io_fpu_req_bits_ren2 = 1'h0; // @[Top.scala:30:7]
wire io_fpu_req_bits_ren3 = 1'h0; // @[Top.scala:30:7]
wire io_fpu_req_bits_swap12 = 1'h0; // @[Top.scala:30:7]
wire io_fpu_req_bits_swap23 = 1'h0; // @[Top.scala:30:7]
wire io_fpu_req_bits_fromint = 1'h0; // @[Top.scala:30:7]
wire io_fpu_req_bits_toint = 1'h0; // @[Top.scala:30:7]
wire io_fpu_req_bits_fastpipe = 1'h0; // @[Top.scala:30:7]
wire io_fpu_req_bits_fma = 1'h0; // @[Top.scala:30:7]
wire io_fpu_req_bits_div = 1'h0; // @[Top.scala:30:7]
wire io_fpu_req_bits_sqrt = 1'h0; // @[Top.scala:30:7]
wire io_fpu_req_bits_wflags = 1'h0; // @[Top.scala:30:7]
wire io_fpu_req_bits_vec = 1'h0; // @[Top.scala:30:7]
wire io_fpu_resp_ready = 1'h0; // @[Top.scala:30:7]
wire io_fpu_resp_valid = 1'h0; // @[Top.scala:30:7]
wire x1_atlNodeOut_a_ready = auto_atl_out_1_a_ready_0; // @[MixedNode.scala:542:17]
wire x1_atlNodeOut_a_valid; // @[MixedNode.scala:542:17]
wire [2:0] x1_atlNodeOut_a_bits_opcode; // @[MixedNode.scala:542:17]
wire [2:0] x1_atlNodeOut_a_bits_param; // @[MixedNode.scala:542:17]
wire [3:0] x1_atlNodeOut_a_bits_size; // @[MixedNode.scala:542:17]
wire [4:0] x1_atlNodeOut_a_bits_source; // @[MixedNode.scala:542:17]
wire [31:0] x1_atlNodeOut_a_bits_address; // @[MixedNode.scala:542:17]
wire [31:0] x1_atlNodeOut_a_bits_mask; // @[MixedNode.scala:542:17]
wire [255:0] x1_atlNodeOut_a_bits_data; // @[MixedNode.scala:542:17]
wire x1_atlNodeOut_a_bits_corrupt; // @[MixedNode.scala:542:17]
wire x1_atlNodeOut_d_ready; // @[MixedNode.scala:542:17]
wire x1_atlNodeOut_d_valid = auto_atl_out_1_d_valid_0; // @[MixedNode.scala:542:17]
wire [2:0] x1_atlNodeOut_d_bits_opcode = auto_atl_out_1_d_bits_opcode_0; // @[MixedNode.scala:542:17]
wire [1:0] x1_atlNodeOut_d_bits_param = auto_atl_out_1_d_bits_param_0; // @[MixedNode.scala:542:17]
wire [3:0] x1_atlNodeOut_d_bits_size = auto_atl_out_1_d_bits_size_0; // @[MixedNode.scala:542:17]
wire [4:0] x1_atlNodeOut_d_bits_source = auto_atl_out_1_d_bits_source_0; // @[MixedNode.scala:542:17]
wire [4:0] x1_atlNodeOut_d_bits_sink = auto_atl_out_1_d_bits_sink_0; // @[MixedNode.scala:542:17]
wire x1_atlNodeOut_d_bits_denied = auto_atl_out_1_d_bits_denied_0; // @[MixedNode.scala:542:17]
wire [255:0] x1_atlNodeOut_d_bits_data = auto_atl_out_1_d_bits_data_0; // @[MixedNode.scala:542:17]
wire x1_atlNodeOut_d_bits_corrupt = auto_atl_out_1_d_bits_corrupt_0; // @[MixedNode.scala:542:17]
wire atlNodeOut_a_ready = auto_atl_out_0_a_ready_0; // @[MixedNode.scala:542:17]
wire atlNodeOut_a_valid; // @[MixedNode.scala:542:17]
wire [2:0] atlNodeOut_a_bits_opcode; // @[MixedNode.scala:542:17]
wire [2:0] atlNodeOut_a_bits_param; // @[MixedNode.scala:542:17]
wire [3:0] atlNodeOut_a_bits_size; // @[MixedNode.scala:542:17]
wire [4:0] atlNodeOut_a_bits_source; // @[MixedNode.scala:542:17]
wire [31:0] atlNodeOut_a_bits_address; // @[MixedNode.scala:542:17]
wire [31:0] atlNodeOut_a_bits_mask; // @[MixedNode.scala:542:17]
wire [255:0] atlNodeOut_a_bits_data; // @[MixedNode.scala:542:17]
wire atlNodeOut_a_bits_corrupt; // @[MixedNode.scala:542:17]
wire atlNodeOut_d_ready; // @[MixedNode.scala:542:17]
wire atlNodeOut_d_valid = auto_atl_out_0_d_valid_0; // @[MixedNode.scala:542:17]
wire [2:0] atlNodeOut_d_bits_opcode = auto_atl_out_0_d_bits_opcode_0; // @[MixedNode.scala:542:17]
wire [1:0] atlNodeOut_d_bits_param = auto_atl_out_0_d_bits_param_0; // @[MixedNode.scala:542:17]
wire [3:0] atlNodeOut_d_bits_size = auto_atl_out_0_d_bits_size_0; // @[MixedNode.scala:542:17]
wire [4:0] atlNodeOut_d_bits_source = auto_atl_out_0_d_bits_source_0; // @[MixedNode.scala:542:17]
wire [4:0] atlNodeOut_d_bits_sink = auto_atl_out_0_d_bits_sink_0; // @[MixedNode.scala:542:17]
wire atlNodeOut_d_bits_denied = auto_atl_out_0_d_bits_denied_0; // @[MixedNode.scala:542:17]
wire [255:0] atlNodeOut_d_bits_data = auto_atl_out_0_d_bits_data_0; // @[MixedNode.scala:542:17]
wire atlNodeOut_d_bits_corrupt = auto_atl_out_0_d_bits_corrupt_0; // @[MixedNode.scala:542:17]
wire [2:0] auto_atl_out_1_a_bits_opcode_0; // @[Top.scala:30:7]
wire [2:0] auto_atl_out_1_a_bits_param_0; // @[Top.scala:30:7]
wire [3:0] auto_atl_out_1_a_bits_size_0; // @[Top.scala:30:7]
wire [4:0] auto_atl_out_1_a_bits_source_0; // @[Top.scala:30:7]
wire [31:0] auto_atl_out_1_a_bits_address_0; // @[Top.scala:30:7]
wire [31:0] auto_atl_out_1_a_bits_mask_0; // @[Top.scala:30:7]
wire [255:0] auto_atl_out_1_a_bits_data_0; // @[Top.scala:30:7]
wire auto_atl_out_1_a_bits_corrupt_0; // @[Top.scala:30:7]
wire auto_atl_out_1_a_valid_0; // @[Top.scala:30:7]
wire auto_atl_out_1_d_ready_0; // @[Top.scala:30:7]
wire [2:0] auto_atl_out_0_a_bits_opcode_0; // @[Top.scala:30:7]
wire [2:0] auto_atl_out_0_a_bits_param_0; // @[Top.scala:30:7]
wire [3:0] auto_atl_out_0_a_bits_size_0; // @[Top.scala:30:7]
wire [4:0] auto_atl_out_0_a_bits_source_0; // @[Top.scala:30:7]
wire [31:0] auto_atl_out_0_a_bits_address_0; // @[Top.scala:30:7]
wire [31:0] auto_atl_out_0_a_bits_mask_0; // @[Top.scala:30:7]
wire [255:0] auto_atl_out_0_a_bits_data_0; // @[Top.scala:30:7]
wire auto_atl_out_0_a_bits_corrupt_0; // @[Top.scala:30:7]
wire auto_atl_out_0_a_valid_0; // @[Top.scala:30:7]
wire auto_atl_out_0_d_ready_0; // @[Top.scala:30:7]
wire io_cmd_ready_0; // @[Top.scala:30:7]
wire [4:0] io_resp_bits_rd_0; // @[Top.scala:30:7]
wire [63:0] io_resp_bits_data_0; // @[Top.scala:30:7]
wire io_resp_valid_0; // @[Top.scala:30:7]
wire [26:0] io_ptw_0_req_bits_bits_addr_0; // @[Top.scala:30:7]
wire io_ptw_0_req_bits_bits_need_gpa_0; // @[Top.scala:30:7]
wire io_ptw_0_req_valid_0; // @[Top.scala:30:7]
wire [26:0] io_ptw_1_req_bits_bits_addr_0; // @[Top.scala:30:7]
wire io_ptw_1_req_bits_bits_need_gpa_0; // @[Top.scala:30:7]
wire io_ptw_1_req_valid_0; // @[Top.scala:30:7]
wire widget_anonIn_a_ready; // @[MixedNode.scala:551:17]
wire widget_anonIn_a_valid = widget_auto_anon_in_a_valid; // @[WidthWidget.scala:27:9]
wire [2:0] widget_anonIn_a_bits_opcode = widget_auto_anon_in_a_bits_opcode; // @[WidthWidget.scala:27:9]
wire [2:0] widget_anonIn_a_bits_param = widget_auto_anon_in_a_bits_param; // @[WidthWidget.scala:27:9]
wire [3:0] widget_anonIn_a_bits_size = widget_auto_anon_in_a_bits_size; // @[WidthWidget.scala:27:9]
wire [4:0] widget_anonIn_a_bits_source = widget_auto_anon_in_a_bits_source; // @[WidthWidget.scala:27:9]
wire [31:0] widget_anonIn_a_bits_address = widget_auto_anon_in_a_bits_address; // @[WidthWidget.scala:27:9]
wire [31:0] widget_anonIn_a_bits_mask = widget_auto_anon_in_a_bits_mask; // @[WidthWidget.scala:27:9]
wire [255:0] widget_anonIn_a_bits_data = widget_auto_anon_in_a_bits_data; // @[WidthWidget.scala:27:9]
wire widget_anonIn_a_bits_corrupt = widget_auto_anon_in_a_bits_corrupt; // @[WidthWidget.scala:27:9]
wire widget_anonIn_d_ready = widget_auto_anon_in_d_ready; // @[WidthWidget.scala:27:9]
wire widget_anonIn_d_valid; // @[MixedNode.scala:551:17]
wire [2:0] widget_anonIn_d_bits_opcode; // @[MixedNode.scala:551:17]
wire [1:0] widget_anonIn_d_bits_param; // @[MixedNode.scala:551:17]
wire [3:0] widget_anonIn_d_bits_size; // @[MixedNode.scala:551:17]
wire [4:0] widget_anonIn_d_bits_source; // @[MixedNode.scala:551:17]
wire [4:0] widget_anonIn_d_bits_sink; // @[MixedNode.scala:551:17]
wire widget_anonIn_d_bits_denied; // @[MixedNode.scala:551:17]
wire [255:0] widget_anonIn_d_bits_data; // @[MixedNode.scala:551:17]
wire widget_anonIn_d_bits_corrupt; // @[MixedNode.scala:551:17]
wire atlNodeIn_a_ready; // @[MixedNode.scala:551:17]
wire widget_anonOut_a_ready = widget_auto_anon_out_a_ready; // @[WidthWidget.scala:27:9]
wire widget_anonOut_a_valid; // @[MixedNode.scala:542:17]
wire [2:0] widget_anonOut_a_bits_opcode; // @[MixedNode.scala:542:17]
wire atlNodeIn_a_valid = widget_auto_anon_out_a_valid; // @[WidthWidget.scala:27:9]
wire [2:0] widget_anonOut_a_bits_param; // @[MixedNode.scala:542:17]
wire [2:0] atlNodeIn_a_bits_opcode = widget_auto_anon_out_a_bits_opcode; // @[WidthWidget.scala:27:9]
wire [3:0] widget_anonOut_a_bits_size; // @[MixedNode.scala:542:17]
wire [2:0] atlNodeIn_a_bits_param = widget_auto_anon_out_a_bits_param; // @[WidthWidget.scala:27:9]
wire [4:0] widget_anonOut_a_bits_source; // @[MixedNode.scala:542:17]
wire [3:0] atlNodeIn_a_bits_size = widget_auto_anon_out_a_bits_size; // @[WidthWidget.scala:27:9]
wire [31:0] widget_anonOut_a_bits_address; // @[MixedNode.scala:542:17]
wire [4:0] atlNodeIn_a_bits_source = widget_auto_anon_out_a_bits_source; // @[WidthWidget.scala:27:9]
wire [31:0] widget_anonOut_a_bits_mask; // @[MixedNode.scala:542:17]
wire [31:0] atlNodeIn_a_bits_address = widget_auto_anon_out_a_bits_address; // @[WidthWidget.scala:27:9]
wire [255:0] widget_anonOut_a_bits_data; // @[MixedNode.scala:542:17]
wire [31:0] atlNodeIn_a_bits_mask = widget_auto_anon_out_a_bits_mask; // @[WidthWidget.scala:27:9]
wire widget_anonOut_a_bits_corrupt; // @[MixedNode.scala:542:17]
wire [255:0] atlNodeIn_a_bits_data = widget_auto_anon_out_a_bits_data; // @[WidthWidget.scala:27:9]
wire widget_anonOut_d_ready; // @[MixedNode.scala:542:17]
wire atlNodeIn_a_bits_corrupt = widget_auto_anon_out_a_bits_corrupt; // @[WidthWidget.scala:27:9]
wire atlNodeIn_d_ready = widget_auto_anon_out_d_ready; // @[WidthWidget.scala:27:9]
wire atlNodeIn_d_valid; // @[MixedNode.scala:551:17]
wire widget_anonOut_d_valid = widget_auto_anon_out_d_valid; // @[WidthWidget.scala:27:9]
wire [2:0] atlNodeIn_d_bits_opcode; // @[MixedNode.scala:551:17]
wire [2:0] widget_anonOut_d_bits_opcode = widget_auto_anon_out_d_bits_opcode; // @[WidthWidget.scala:27:9]
wire [1:0] atlNodeIn_d_bits_param; // @[MixedNode.scala:551:17]
wire [1:0] widget_anonOut_d_bits_param = widget_auto_anon_out_d_bits_param; // @[WidthWidget.scala:27:9]
wire [3:0] atlNodeIn_d_bits_size; // @[MixedNode.scala:551:17]
wire [3:0] widget_anonOut_d_bits_size = widget_auto_anon_out_d_bits_size; // @[WidthWidget.scala:27:9]
wire [4:0] atlNodeIn_d_bits_source; // @[MixedNode.scala:551:17]
wire [4:0] widget_anonOut_d_bits_source = widget_auto_anon_out_d_bits_source; // @[WidthWidget.scala:27:9]
wire [4:0] atlNodeIn_d_bits_sink; // @[MixedNode.scala:551:17]
wire [4:0] widget_anonOut_d_bits_sink = widget_auto_anon_out_d_bits_sink; // @[WidthWidget.scala:27:9]
wire atlNodeIn_d_bits_denied; // @[MixedNode.scala:551:17]
wire widget_anonOut_d_bits_denied = widget_auto_anon_out_d_bits_denied; // @[WidthWidget.scala:27:9]
wire [255:0] atlNodeIn_d_bits_data; // @[MixedNode.scala:551:17]
wire [255:0] widget_anonOut_d_bits_data = widget_auto_anon_out_d_bits_data; // @[WidthWidget.scala:27:9]
wire atlNodeIn_d_bits_corrupt; // @[MixedNode.scala:551:17]
wire widget_anonOut_d_bits_corrupt = widget_auto_anon_out_d_bits_corrupt; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_in_a_ready; // @[WidthWidget.scala:27:9]
wire [2:0] widget_auto_anon_in_d_bits_opcode; // @[WidthWidget.scala:27:9]
wire [1:0] widget_auto_anon_in_d_bits_param; // @[WidthWidget.scala:27:9]
wire [3:0] widget_auto_anon_in_d_bits_size; // @[WidthWidget.scala:27:9]
wire [4:0] widget_auto_anon_in_d_bits_source; // @[WidthWidget.scala:27:9]
wire [4:0] widget_auto_anon_in_d_bits_sink; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_in_d_bits_denied; // @[WidthWidget.scala:27:9]
wire [255:0] widget_auto_anon_in_d_bits_data; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_in_d_bits_corrupt; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_in_d_valid; // @[WidthWidget.scala:27:9]
assign widget_anonIn_a_ready = widget_anonOut_a_ready; // @[MixedNode.scala:542:17, :551:17]
assign widget_auto_anon_out_a_valid = widget_anonOut_a_valid; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_out_a_bits_opcode = widget_anonOut_a_bits_opcode; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_out_a_bits_param = widget_anonOut_a_bits_param; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_out_a_bits_size = widget_anonOut_a_bits_size; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_out_a_bits_source = widget_anonOut_a_bits_source; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_out_a_bits_address = widget_anonOut_a_bits_address; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_out_a_bits_mask = widget_anonOut_a_bits_mask; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_out_a_bits_data = widget_anonOut_a_bits_data; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_out_a_bits_corrupt = widget_anonOut_a_bits_corrupt; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_out_d_ready = widget_anonOut_d_ready; // @[WidthWidget.scala:27:9]
assign widget_anonIn_d_valid = widget_anonOut_d_valid; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonIn_d_bits_opcode = widget_anonOut_d_bits_opcode; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonIn_d_bits_param = widget_anonOut_d_bits_param; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonIn_d_bits_size = widget_anonOut_d_bits_size; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonIn_d_bits_source = widget_anonOut_d_bits_source; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonIn_d_bits_sink = widget_anonOut_d_bits_sink; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonIn_d_bits_denied = widget_anonOut_d_bits_denied; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonIn_d_bits_data = widget_anonOut_d_bits_data; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonIn_d_bits_corrupt = widget_anonOut_d_bits_corrupt; // @[MixedNode.scala:542:17, :551:17]
assign widget_auto_anon_in_a_ready = widget_anonIn_a_ready; // @[WidthWidget.scala:27:9]
assign widget_anonOut_a_valid = widget_anonIn_a_valid; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonOut_a_bits_opcode = widget_anonIn_a_bits_opcode; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonOut_a_bits_param = widget_anonIn_a_bits_param; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonOut_a_bits_size = widget_anonIn_a_bits_size; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonOut_a_bits_source = widget_anonIn_a_bits_source; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonOut_a_bits_address = widget_anonIn_a_bits_address; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonOut_a_bits_mask = widget_anonIn_a_bits_mask; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonOut_a_bits_data = widget_anonIn_a_bits_data; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonOut_a_bits_corrupt = widget_anonIn_a_bits_corrupt; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonOut_d_ready = widget_anonIn_d_ready; // @[MixedNode.scala:542:17, :551:17]
assign widget_auto_anon_in_d_valid = widget_anonIn_d_valid; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_in_d_bits_opcode = widget_anonIn_d_bits_opcode; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_in_d_bits_param = widget_anonIn_d_bits_param; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_in_d_bits_size = widget_anonIn_d_bits_size; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_in_d_bits_source = widget_anonIn_d_bits_source; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_in_d_bits_sink = widget_anonIn_d_bits_sink; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_in_d_bits_denied = widget_anonIn_d_bits_denied; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_in_d_bits_data = widget_anonIn_d_bits_data; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_in_d_bits_corrupt = widget_anonIn_d_bits_corrupt; // @[WidthWidget.scala:27:9]
wire widget_1_anonIn_a_ready; // @[MixedNode.scala:551:17]
wire widget_1_anonIn_a_valid = widget_1_auto_anon_in_a_valid; // @[WidthWidget.scala:27:9]
wire [2:0] widget_1_anonIn_a_bits_opcode = widget_1_auto_anon_in_a_bits_opcode; // @[WidthWidget.scala:27:9]
wire [2:0] widget_1_anonIn_a_bits_param = widget_1_auto_anon_in_a_bits_param; // @[WidthWidget.scala:27:9]
wire [3:0] widget_1_anonIn_a_bits_size = widget_1_auto_anon_in_a_bits_size; // @[WidthWidget.scala:27:9]
wire [4:0] widget_1_anonIn_a_bits_source = widget_1_auto_anon_in_a_bits_source; // @[WidthWidget.scala:27:9]
wire [31:0] widget_1_anonIn_a_bits_address = widget_1_auto_anon_in_a_bits_address; // @[WidthWidget.scala:27:9]
wire [31:0] widget_1_anonIn_a_bits_mask = widget_1_auto_anon_in_a_bits_mask; // @[WidthWidget.scala:27:9]
wire [255:0] widget_1_anonIn_a_bits_data = widget_1_auto_anon_in_a_bits_data; // @[WidthWidget.scala:27:9]
wire widget_1_anonIn_a_bits_corrupt = widget_1_auto_anon_in_a_bits_corrupt; // @[WidthWidget.scala:27:9]
wire widget_1_anonIn_d_ready = widget_1_auto_anon_in_d_ready; // @[WidthWidget.scala:27:9]
wire widget_1_anonIn_d_valid; // @[MixedNode.scala:551:17]
wire [2:0] widget_1_anonIn_d_bits_opcode; // @[MixedNode.scala:551:17]
wire [1:0] widget_1_anonIn_d_bits_param; // @[MixedNode.scala:551:17]
wire [3:0] widget_1_anonIn_d_bits_size; // @[MixedNode.scala:551:17]
wire [4:0] widget_1_anonIn_d_bits_source; // @[MixedNode.scala:551:17]
wire [4:0] widget_1_anonIn_d_bits_sink; // @[MixedNode.scala:551:17]
wire widget_1_anonIn_d_bits_denied; // @[MixedNode.scala:551:17]
wire [255:0] widget_1_anonIn_d_bits_data; // @[MixedNode.scala:551:17]
wire widget_1_anonIn_d_bits_corrupt; // @[MixedNode.scala:551:17]
wire x1_atlNodeIn_a_ready; // @[MixedNode.scala:551:17]
wire widget_1_anonOut_a_ready = widget_1_auto_anon_out_a_ready; // @[WidthWidget.scala:27:9]
wire widget_1_anonOut_a_valid; // @[MixedNode.scala:542:17]
wire [2:0] widget_1_anonOut_a_bits_opcode; // @[MixedNode.scala:542:17]
wire x1_atlNodeIn_a_valid = widget_1_auto_anon_out_a_valid; // @[WidthWidget.scala:27:9]
wire [2:0] widget_1_anonOut_a_bits_param; // @[MixedNode.scala:542:17]
wire [2:0] x1_atlNodeIn_a_bits_opcode = widget_1_auto_anon_out_a_bits_opcode; // @[WidthWidget.scala:27:9]
wire [3:0] widget_1_anonOut_a_bits_size; // @[MixedNode.scala:542:17]
wire [2:0] x1_atlNodeIn_a_bits_param = widget_1_auto_anon_out_a_bits_param; // @[WidthWidget.scala:27:9]
wire [4:0] widget_1_anonOut_a_bits_source; // @[MixedNode.scala:542:17]
wire [3:0] x1_atlNodeIn_a_bits_size = widget_1_auto_anon_out_a_bits_size; // @[WidthWidget.scala:27:9]
wire [31:0] widget_1_anonOut_a_bits_address; // @[MixedNode.scala:542:17]
wire [4:0] x1_atlNodeIn_a_bits_source = widget_1_auto_anon_out_a_bits_source; // @[WidthWidget.scala:27:9]
wire [31:0] widget_1_anonOut_a_bits_mask; // @[MixedNode.scala:542:17]
wire [31:0] x1_atlNodeIn_a_bits_address = widget_1_auto_anon_out_a_bits_address; // @[WidthWidget.scala:27:9]
wire [255:0] widget_1_anonOut_a_bits_data; // @[MixedNode.scala:542:17]
wire [31:0] x1_atlNodeIn_a_bits_mask = widget_1_auto_anon_out_a_bits_mask; // @[WidthWidget.scala:27:9]
wire widget_1_anonOut_a_bits_corrupt; // @[MixedNode.scala:542:17]
wire [255:0] x1_atlNodeIn_a_bits_data = widget_1_auto_anon_out_a_bits_data; // @[WidthWidget.scala:27:9]
wire widget_1_anonOut_d_ready; // @[MixedNode.scala:542:17]
wire x1_atlNodeIn_a_bits_corrupt = widget_1_auto_anon_out_a_bits_corrupt; // @[WidthWidget.scala:27:9]
wire x1_atlNodeIn_d_ready = widget_1_auto_anon_out_d_ready; // @[WidthWidget.scala:27:9]
wire x1_atlNodeIn_d_valid; // @[MixedNode.scala:551:17]
wire widget_1_anonOut_d_valid = widget_1_auto_anon_out_d_valid; // @[WidthWidget.scala:27:9]
wire [2:0] x1_atlNodeIn_d_bits_opcode; // @[MixedNode.scala:551:17]
wire [2:0] widget_1_anonOut_d_bits_opcode = widget_1_auto_anon_out_d_bits_opcode; // @[WidthWidget.scala:27:9]
wire [1:0] x1_atlNodeIn_d_bits_param; // @[MixedNode.scala:551:17]
wire [1:0] widget_1_anonOut_d_bits_param = widget_1_auto_anon_out_d_bits_param; // @[WidthWidget.scala:27:9]
wire [3:0] x1_atlNodeIn_d_bits_size; // @[MixedNode.scala:551:17]
wire [3:0] widget_1_anonOut_d_bits_size = widget_1_auto_anon_out_d_bits_size; // @[WidthWidget.scala:27:9]
wire [4:0] x1_atlNodeIn_d_bits_source; // @[MixedNode.scala:551:17]
wire [4:0] widget_1_anonOut_d_bits_source = widget_1_auto_anon_out_d_bits_source; // @[WidthWidget.scala:27:9]
wire [4:0] x1_atlNodeIn_d_bits_sink; // @[MixedNode.scala:551:17]
wire [4:0] widget_1_anonOut_d_bits_sink = widget_1_auto_anon_out_d_bits_sink; // @[WidthWidget.scala:27:9]
wire x1_atlNodeIn_d_bits_denied; // @[MixedNode.scala:551:17]
wire widget_1_anonOut_d_bits_denied = widget_1_auto_anon_out_d_bits_denied; // @[WidthWidget.scala:27:9]
wire [255:0] x1_atlNodeIn_d_bits_data; // @[MixedNode.scala:551:17]
wire [255:0] widget_1_anonOut_d_bits_data = widget_1_auto_anon_out_d_bits_data; // @[WidthWidget.scala:27:9]
wire x1_atlNodeIn_d_bits_corrupt; // @[MixedNode.scala:551:17]
wire widget_1_anonOut_d_bits_corrupt = widget_1_auto_anon_out_d_bits_corrupt; // @[WidthWidget.scala:27:9]
wire widget_1_auto_anon_in_a_ready; // @[WidthWidget.scala:27:9]
wire [2:0] widget_1_auto_anon_in_d_bits_opcode; // @[WidthWidget.scala:27:9]
wire [1:0] widget_1_auto_anon_in_d_bits_param; // @[WidthWidget.scala:27:9]
wire [3:0] widget_1_auto_anon_in_d_bits_size; // @[WidthWidget.scala:27:9]
wire [4:0] widget_1_auto_anon_in_d_bits_source; // @[WidthWidget.scala:27:9]
wire [4:0] widget_1_auto_anon_in_d_bits_sink; // @[WidthWidget.scala:27:9]
wire widget_1_auto_anon_in_d_bits_denied; // @[WidthWidget.scala:27:9]
wire [255:0] widget_1_auto_anon_in_d_bits_data; // @[WidthWidget.scala:27:9]
wire widget_1_auto_anon_in_d_bits_corrupt; // @[WidthWidget.scala:27:9]
wire widget_1_auto_anon_in_d_valid; // @[WidthWidget.scala:27:9]
assign widget_1_anonIn_a_ready = widget_1_anonOut_a_ready; // @[MixedNode.scala:542:17, :551:17]
assign widget_1_auto_anon_out_a_valid = widget_1_anonOut_a_valid; // @[WidthWidget.scala:27:9]
assign widget_1_auto_anon_out_a_bits_opcode = widget_1_anonOut_a_bits_opcode; // @[WidthWidget.scala:27:9]
assign widget_1_auto_anon_out_a_bits_param = widget_1_anonOut_a_bits_param; // @[WidthWidget.scala:27:9]
assign widget_1_auto_anon_out_a_bits_size = widget_1_anonOut_a_bits_size; // @[WidthWidget.scala:27:9]
assign widget_1_auto_anon_out_a_bits_source = widget_1_anonOut_a_bits_source; // @[WidthWidget.scala:27:9]
assign widget_1_auto_anon_out_a_bits_address = widget_1_anonOut_a_bits_address; // @[WidthWidget.scala:27:9]
assign widget_1_auto_anon_out_a_bits_mask = widget_1_anonOut_a_bits_mask; // @[WidthWidget.scala:27:9]
assign widget_1_auto_anon_out_a_bits_data = widget_1_anonOut_a_bits_data; // @[WidthWidget.scala:27:9]
assign widget_1_auto_anon_out_a_bits_corrupt = widget_1_anonOut_a_bits_corrupt; // @[WidthWidget.scala:27:9]
assign widget_1_auto_anon_out_d_ready = widget_1_anonOut_d_ready; // @[WidthWidget.scala:27:9]
assign widget_1_anonIn_d_valid = widget_1_anonOut_d_valid; // @[MixedNode.scala:542:17, :551:17]
assign widget_1_anonIn_d_bits_opcode = widget_1_anonOut_d_bits_opcode; // @[MixedNode.scala:542:17, :551:17]
assign widget_1_anonIn_d_bits_param = widget_1_anonOut_d_bits_param; // @[MixedNode.scala:542:17, :551:17]
assign widget_1_anonIn_d_bits_size = widget_1_anonOut_d_bits_size; // @[MixedNode.scala:542:17, :551:17]
assign widget_1_anonIn_d_bits_source = widget_1_anonOut_d_bits_source; // @[MixedNode.scala:542:17, :551:17]
assign widget_1_anonIn_d_bits_sink = widget_1_anonOut_d_bits_sink; // @[MixedNode.scala:542:17, :551:17]
assign widget_1_anonIn_d_bits_denied = widget_1_anonOut_d_bits_denied; // @[MixedNode.scala:542:17, :551:17]
assign widget_1_anonIn_d_bits_data = widget_1_anonOut_d_bits_data; // @[MixedNode.scala:542:17, :551:17]
assign widget_1_anonIn_d_bits_corrupt = widget_1_anonOut_d_bits_corrupt; // @[MixedNode.scala:542:17, :551:17]
assign widget_1_auto_anon_in_a_ready = widget_1_anonIn_a_ready; // @[WidthWidget.scala:27:9]
assign widget_1_anonOut_a_valid = widget_1_anonIn_a_valid; // @[MixedNode.scala:542:17, :551:17]
assign widget_1_anonOut_a_bits_opcode = widget_1_anonIn_a_bits_opcode; // @[MixedNode.scala:542:17, :551:17]
assign widget_1_anonOut_a_bits_param = widget_1_anonIn_a_bits_param; // @[MixedNode.scala:542:17, :551:17]
assign widget_1_anonOut_a_bits_size = widget_1_anonIn_a_bits_size; // @[MixedNode.scala:542:17, :551:17]
assign widget_1_anonOut_a_bits_source = widget_1_anonIn_a_bits_source; // @[MixedNode.scala:542:17, :551:17]
assign widget_1_anonOut_a_bits_address = widget_1_anonIn_a_bits_address; // @[MixedNode.scala:542:17, :551:17]
assign widget_1_anonOut_a_bits_mask = widget_1_anonIn_a_bits_mask; // @[MixedNode.scala:542:17, :551:17]
assign widget_1_anonOut_a_bits_data = widget_1_anonIn_a_bits_data; // @[MixedNode.scala:542:17, :551:17]
assign widget_1_anonOut_a_bits_corrupt = widget_1_anonIn_a_bits_corrupt; // @[MixedNode.scala:542:17, :551:17]
assign widget_1_anonOut_d_ready = widget_1_anonIn_d_ready; // @[MixedNode.scala:542:17, :551:17]
assign widget_1_auto_anon_in_d_valid = widget_1_anonIn_d_valid; // @[WidthWidget.scala:27:9]
assign widget_1_auto_anon_in_d_bits_opcode = widget_1_anonIn_d_bits_opcode; // @[WidthWidget.scala:27:9]
assign widget_1_auto_anon_in_d_bits_param = widget_1_anonIn_d_bits_param; // @[WidthWidget.scala:27:9]
assign widget_1_auto_anon_in_d_bits_size = widget_1_anonIn_d_bits_size; // @[WidthWidget.scala:27:9]
assign widget_1_auto_anon_in_d_bits_source = widget_1_anonIn_d_bits_source; // @[WidthWidget.scala:27:9]
assign widget_1_auto_anon_in_d_bits_sink = widget_1_anonIn_d_bits_sink; // @[WidthWidget.scala:27:9]
assign widget_1_auto_anon_in_d_bits_denied = widget_1_anonIn_d_bits_denied; // @[WidthWidget.scala:27:9]
assign widget_1_auto_anon_in_d_bits_data = widget_1_anonIn_d_bits_data; // @[WidthWidget.scala:27:9]
assign widget_1_auto_anon_in_d_bits_corrupt = widget_1_anonIn_d_bits_corrupt; // @[WidthWidget.scala:27:9]
assign atlNodeIn_a_ready = atlNodeOut_a_ready; // @[MixedNode.scala:542:17, :551:17]
assign auto_atl_out_0_a_valid_0 = atlNodeOut_a_valid; // @[MixedNode.scala:542:17]
assign auto_atl_out_0_a_bits_opcode_0 = atlNodeOut_a_bits_opcode; // @[MixedNode.scala:542:17]
assign auto_atl_out_0_a_bits_param_0 = atlNodeOut_a_bits_param; // @[MixedNode.scala:542:17]
assign auto_atl_out_0_a_bits_size_0 = atlNodeOut_a_bits_size; // @[MixedNode.scala:542:17]
assign auto_atl_out_0_a_bits_source_0 = atlNodeOut_a_bits_source; // @[MixedNode.scala:542:17]
assign auto_atl_out_0_a_bits_address_0 = atlNodeOut_a_bits_address; // @[MixedNode.scala:542:17]
assign auto_atl_out_0_a_bits_mask_0 = atlNodeOut_a_bits_mask; // @[MixedNode.scala:542:17]
assign auto_atl_out_0_a_bits_data_0 = atlNodeOut_a_bits_data; // @[MixedNode.scala:542:17]
assign auto_atl_out_0_a_bits_corrupt_0 = atlNodeOut_a_bits_corrupt; // @[MixedNode.scala:542:17]
assign auto_atl_out_0_d_ready_0 = atlNodeOut_d_ready; // @[MixedNode.scala:542:17]
assign atlNodeIn_d_valid = atlNodeOut_d_valid; // @[MixedNode.scala:542:17, :551:17]
assign atlNodeIn_d_bits_opcode = atlNodeOut_d_bits_opcode; // @[MixedNode.scala:542:17, :551:17]
assign atlNodeIn_d_bits_param = atlNodeOut_d_bits_param; // @[MixedNode.scala:542:17, :551:17]
assign atlNodeIn_d_bits_size = atlNodeOut_d_bits_size; // @[MixedNode.scala:542:17, :551:17]
assign atlNodeIn_d_bits_source = atlNodeOut_d_bits_source; // @[MixedNode.scala:542:17, :551:17]
assign atlNodeIn_d_bits_sink = atlNodeOut_d_bits_sink; // @[MixedNode.scala:542:17, :551:17]
assign atlNodeIn_d_bits_denied = atlNodeOut_d_bits_denied; // @[MixedNode.scala:542:17, :551:17]
assign atlNodeIn_d_bits_data = atlNodeOut_d_bits_data; // @[MixedNode.scala:542:17, :551:17]
assign atlNodeIn_d_bits_corrupt = atlNodeOut_d_bits_corrupt; // @[MixedNode.scala:542:17, :551:17]
assign x1_atlNodeIn_a_ready = x1_atlNodeOut_a_ready; // @[MixedNode.scala:542:17, :551:17]
assign auto_atl_out_1_a_valid_0 = x1_atlNodeOut_a_valid; // @[MixedNode.scala:542:17]
assign auto_atl_out_1_a_bits_opcode_0 = x1_atlNodeOut_a_bits_opcode; // @[MixedNode.scala:542:17]
assign auto_atl_out_1_a_bits_param_0 = x1_atlNodeOut_a_bits_param; // @[MixedNode.scala:542:17]
assign auto_atl_out_1_a_bits_size_0 = x1_atlNodeOut_a_bits_size; // @[MixedNode.scala:542:17]
assign auto_atl_out_1_a_bits_source_0 = x1_atlNodeOut_a_bits_source; // @[MixedNode.scala:542:17]
assign auto_atl_out_1_a_bits_address_0 = x1_atlNodeOut_a_bits_address; // @[MixedNode.scala:542:17]
assign auto_atl_out_1_a_bits_mask_0 = x1_atlNodeOut_a_bits_mask; // @[MixedNode.scala:542:17]
assign auto_atl_out_1_a_bits_data_0 = x1_atlNodeOut_a_bits_data; // @[MixedNode.scala:542:17]
assign auto_atl_out_1_a_bits_corrupt_0 = x1_atlNodeOut_a_bits_corrupt; // @[MixedNode.scala:542:17]
assign auto_atl_out_1_d_ready_0 = x1_atlNodeOut_d_ready; // @[MixedNode.scala:542:17]
assign x1_atlNodeIn_d_valid = x1_atlNodeOut_d_valid; // @[MixedNode.scala:542:17, :551:17]
assign x1_atlNodeIn_d_bits_opcode = x1_atlNodeOut_d_bits_opcode; // @[MixedNode.scala:542:17, :551:17]
assign x1_atlNodeIn_d_bits_param = x1_atlNodeOut_d_bits_param; // @[MixedNode.scala:542:17, :551:17]
assign x1_atlNodeIn_d_bits_size = x1_atlNodeOut_d_bits_size; // @[MixedNode.scala:542:17, :551:17]
assign x1_atlNodeIn_d_bits_source = x1_atlNodeOut_d_bits_source; // @[MixedNode.scala:542:17, :551:17]
assign x1_atlNodeIn_d_bits_sink = x1_atlNodeOut_d_bits_sink; // @[MixedNode.scala:542:17, :551:17]
assign x1_atlNodeIn_d_bits_denied = x1_atlNodeOut_d_bits_denied; // @[MixedNode.scala:542:17, :551:17]
assign x1_atlNodeIn_d_bits_data = x1_atlNodeOut_d_bits_data; // @[MixedNode.scala:542:17, :551:17]
assign x1_atlNodeIn_d_bits_corrupt = x1_atlNodeOut_d_bits_corrupt; // @[MixedNode.scala:542:17, :551:17]
assign widget_auto_anon_out_a_ready = atlNodeIn_a_ready; // @[WidthWidget.scala:27:9]
assign atlNodeOut_a_valid = atlNodeIn_a_valid; // @[MixedNode.scala:542:17, :551:17]
assign atlNodeOut_a_bits_opcode = atlNodeIn_a_bits_opcode; // @[MixedNode.scala:542:17, :551:17]
assign atlNodeOut_a_bits_param = atlNodeIn_a_bits_param; // @[MixedNode.scala:542:17, :551:17]
assign atlNodeOut_a_bits_size = atlNodeIn_a_bits_size; // @[MixedNode.scala:542:17, :551:17]
assign atlNodeOut_a_bits_source = atlNodeIn_a_bits_source; // @[MixedNode.scala:542:17, :551:17]
assign atlNodeOut_a_bits_address = atlNodeIn_a_bits_address; // @[MixedNode.scala:542:17, :551:17]
assign atlNodeOut_a_bits_mask = atlNodeIn_a_bits_mask; // @[MixedNode.scala:542:17, :551:17]
assign atlNodeOut_a_bits_data = atlNodeIn_a_bits_data; // @[MixedNode.scala:542:17, :551:17]
assign atlNodeOut_a_bits_corrupt = atlNodeIn_a_bits_corrupt; // @[MixedNode.scala:542:17, :551:17]
assign atlNodeOut_d_ready = atlNodeIn_d_ready; // @[MixedNode.scala:542:17, :551:17]
assign widget_auto_anon_out_d_valid = atlNodeIn_d_valid; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_out_d_bits_opcode = atlNodeIn_d_bits_opcode; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_out_d_bits_param = atlNodeIn_d_bits_param; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_out_d_bits_size = atlNodeIn_d_bits_size; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_out_d_bits_source = atlNodeIn_d_bits_source; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_out_d_bits_sink = atlNodeIn_d_bits_sink; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_out_d_bits_denied = atlNodeIn_d_bits_denied; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_out_d_bits_data = atlNodeIn_d_bits_data; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_out_d_bits_corrupt = atlNodeIn_d_bits_corrupt; // @[WidthWidget.scala:27:9]
assign widget_1_auto_anon_out_a_ready = x1_atlNodeIn_a_ready; // @[WidthWidget.scala:27:9]
assign x1_atlNodeOut_a_valid = x1_atlNodeIn_a_valid; // @[MixedNode.scala:542:17, :551:17]
assign x1_atlNodeOut_a_bits_opcode = x1_atlNodeIn_a_bits_opcode; // @[MixedNode.scala:542:17, :551:17]
assign x1_atlNodeOut_a_bits_param = x1_atlNodeIn_a_bits_param; // @[MixedNode.scala:542:17, :551:17]
assign x1_atlNodeOut_a_bits_size = x1_atlNodeIn_a_bits_size; // @[MixedNode.scala:542:17, :551:17]
assign x1_atlNodeOut_a_bits_source = x1_atlNodeIn_a_bits_source; // @[MixedNode.scala:542:17, :551:17]
assign x1_atlNodeOut_a_bits_address = x1_atlNodeIn_a_bits_address; // @[MixedNode.scala:542:17, :551:17]
assign x1_atlNodeOut_a_bits_mask = x1_atlNodeIn_a_bits_mask; // @[MixedNode.scala:542:17, :551:17]
assign x1_atlNodeOut_a_bits_data = x1_atlNodeIn_a_bits_data; // @[MixedNode.scala:542:17, :551:17]
assign x1_atlNodeOut_a_bits_corrupt = x1_atlNodeIn_a_bits_corrupt; // @[MixedNode.scala:542:17, :551:17]
assign x1_atlNodeOut_d_ready = x1_atlNodeIn_d_ready; // @[MixedNode.scala:542:17, :551:17]
assign widget_1_auto_anon_out_d_valid = x1_atlNodeIn_d_valid; // @[WidthWidget.scala:27:9]
assign widget_1_auto_anon_out_d_bits_opcode = x1_atlNodeIn_d_bits_opcode; // @[WidthWidget.scala:27:9]
assign widget_1_auto_anon_out_d_bits_param = x1_atlNodeIn_d_bits_param; // @[WidthWidget.scala:27:9]
assign widget_1_auto_anon_out_d_bits_size = x1_atlNodeIn_d_bits_size; // @[WidthWidget.scala:27:9]
assign widget_1_auto_anon_out_d_bits_source = x1_atlNodeIn_d_bits_source; // @[WidthWidget.scala:27:9]
assign widget_1_auto_anon_out_d_bits_sink = x1_atlNodeIn_d_bits_sink; // @[WidthWidget.scala:27:9]
assign widget_1_auto_anon_out_d_bits_denied = x1_atlNodeIn_d_bits_denied; // @[WidthWidget.scala:27:9]
assign widget_1_auto_anon_out_d_bits_data = x1_atlNodeIn_d_bits_data; // @[WidthWidget.scala:27:9]
assign widget_1_auto_anon_out_d_bits_corrupt = x1_atlNodeIn_d_bits_corrupt; // @[WidthWidget.scala:27:9]
L2MemHelper l2_memloader ( // @[MemStreamerAccel.scala:34:36]
.clock (clock),
.reset (reset),
.auto_master_out_a_ready (_buffers_auto_in_a_ready), // @[Buffer.scala:80:47]
.auto_master_out_a_valid (_l2_memloader_auto_master_out_a_valid),
.auto_master_out_a_bits_opcode (_l2_memloader_auto_master_out_a_bits_opcode),
.auto_master_out_a_bits_size (_l2_memloader_auto_master_out_a_bits_size),
.auto_master_out_a_bits_source (_l2_memloader_auto_master_out_a_bits_source),
.auto_master_out_a_bits_address (_l2_memloader_auto_master_out_a_bits_address),
.auto_master_out_a_bits_mask (_l2_memloader_auto_master_out_a_bits_mask),
.auto_master_out_a_bits_data (_l2_memloader_auto_master_out_a_bits_data),
.auto_master_out_d_ready (_l2_memloader_auto_master_out_d_ready),
.auto_master_out_d_valid (_buffers_auto_in_d_valid), // @[Buffer.scala:80:47]
.auto_master_out_d_bits_opcode (_buffers_auto_in_d_bits_opcode), // @[Buffer.scala:80:47]
.auto_master_out_d_bits_param (_buffers_auto_in_d_bits_param), // @[Buffer.scala:80:47]
.auto_master_out_d_bits_size (_buffers_auto_in_d_bits_size), // @[Buffer.scala:80:47]
.auto_master_out_d_bits_source (_buffers_auto_in_d_bits_source), // @[Buffer.scala:80:47]
.auto_master_out_d_bits_sink (_buffers_auto_in_d_bits_sink), // @[Buffer.scala:80:47]
.auto_master_out_d_bits_denied (_buffers_auto_in_d_bits_denied), // @[Buffer.scala:80:47]
.auto_master_out_d_bits_data (_buffers_auto_in_d_bits_data), // @[Buffer.scala:80:47]
.auto_master_out_d_bits_corrupt (_buffers_auto_in_d_bits_corrupt), // @[Buffer.scala:80:47]
.io_userif_req_ready (_l2_memloader_io_userif_req_ready),
.io_userif_req_valid (_memloader_io_l2helperUser_req_valid), // @[MemStreamerAccel.scala:63:25]
.io_userif_req_bits_addr (_memloader_io_l2helperUser_req_bits_addr), // @[MemStreamerAccel.scala:63:25]
.io_userif_resp_ready (_memloader_io_l2helperUser_resp_ready), // @[MemStreamerAccel.scala:63:25]
.io_userif_resp_valid (_l2_memloader_io_userif_resp_valid),
.io_userif_resp_bits_data (_l2_memloader_io_userif_resp_bits_data),
.io_userif_no_memops_inflight (_l2_memloader_io_userif_no_memops_inflight),
.io_sfence (_memloader_io_src_info_cmd_router_cur_funct_io_sfence_out), // @[Top.scala:35:31]
.io_ptw_req_ready (io_ptw_0_req_ready_0), // @[Top.scala:30:7]
.io_ptw_req_valid (io_ptw_0_req_valid_0),
.io_ptw_req_bits_bits_addr (io_ptw_0_req_bits_bits_addr_0),
.io_ptw_req_bits_bits_need_gpa (io_ptw_0_req_bits_bits_need_gpa_0),
.io_ptw_resp_valid (io_ptw_0_resp_valid_0), // @[Top.scala:30:7]
.io_ptw_resp_bits_ae_ptw (io_ptw_0_resp_bits_ae_ptw_0), // @[Top.scala:30:7]
.io_ptw_resp_bits_ae_final (io_ptw_0_resp_bits_ae_final_0), // @[Top.scala:30:7]
.io_ptw_resp_bits_pf (io_ptw_0_resp_bits_pf_0), // @[Top.scala:30:7]
.io_ptw_resp_bits_gf (io_ptw_0_resp_bits_gf_0), // @[Top.scala:30:7]
.io_ptw_resp_bits_hr (io_ptw_0_resp_bits_hr_0), // @[Top.scala:30:7]
.io_ptw_resp_bits_hw (io_ptw_0_resp_bits_hw_0), // @[Top.scala:30:7]
.io_ptw_resp_bits_hx (io_ptw_0_resp_bits_hx_0), // @[Top.scala:30:7]
.io_ptw_resp_bits_pte_reserved_for_future (io_ptw_0_resp_bits_pte_reserved_for_future_0), // @[Top.scala:30:7]
.io_ptw_resp_bits_pte_ppn (io_ptw_0_resp_bits_pte_ppn_0), // @[Top.scala:30:7]
.io_ptw_resp_bits_pte_reserved_for_software (io_ptw_0_resp_bits_pte_reserved_for_software_0), // @[Top.scala:30:7]
.io_ptw_resp_bits_pte_d (io_ptw_0_resp_bits_pte_d_0), // @[Top.scala:30:7]
.io_ptw_resp_bits_pte_a (io_ptw_0_resp_bits_pte_a_0), // @[Top.scala:30:7]
.io_ptw_resp_bits_pte_g (io_ptw_0_resp_bits_pte_g_0), // @[Top.scala:30:7]
.io_ptw_resp_bits_pte_u (io_ptw_0_resp_bits_pte_u_0), // @[Top.scala:30:7]
.io_ptw_resp_bits_pte_x (io_ptw_0_resp_bits_pte_x_0), // @[Top.scala:30:7]
.io_ptw_resp_bits_pte_w (io_ptw_0_resp_bits_pte_w_0), // @[Top.scala:30:7]
.io_ptw_resp_bits_pte_r (io_ptw_0_resp_bits_pte_r_0), // @[Top.scala:30:7]
.io_ptw_resp_bits_pte_v (io_ptw_0_resp_bits_pte_v_0), // @[Top.scala:30:7]
.io_ptw_resp_bits_level (io_ptw_0_resp_bits_level_0), // @[Top.scala:30:7]
.io_ptw_resp_bits_homogeneous (io_ptw_0_resp_bits_homogeneous_0), // @[Top.scala:30:7]
.io_ptw_resp_bits_gpa_valid (io_ptw_0_resp_bits_gpa_valid_0), // @[Top.scala:30:7]
.io_ptw_resp_bits_gpa_bits (io_ptw_0_resp_bits_gpa_bits_0), // @[Top.scala:30:7]
.io_ptw_resp_bits_gpa_is_pte (io_ptw_0_resp_bits_gpa_is_pte_0), // @[Top.scala:30:7]
.io_ptw_ptbr_mode (io_ptw_0_ptbr_mode_0), // @[Top.scala:30:7]
.io_ptw_ptbr_ppn (io_ptw_0_ptbr_ppn_0), // @[Top.scala:30:7]
.io_ptw_status_debug (io_ptw_0_status_debug_0), // @[Top.scala:30:7]
.io_ptw_status_cease (io_ptw_0_status_cease_0), // @[Top.scala:30:7]
.io_ptw_status_wfi (io_ptw_0_status_wfi_0), // @[Top.scala:30:7]
.io_ptw_status_isa (io_ptw_0_status_isa_0), // @[Top.scala:30:7]
.io_ptw_status_dprv (io_ptw_0_status_dprv_0), // @[Top.scala:30:7]
.io_ptw_status_dv (io_ptw_0_status_dv_0), // @[Top.scala:30:7]
.io_ptw_status_prv (io_ptw_0_status_prv_0), // @[Top.scala:30:7]
.io_ptw_status_v (io_ptw_0_status_v_0), // @[Top.scala:30:7]
.io_ptw_status_mpv (io_ptw_0_status_mpv_0), // @[Top.scala:30:7]
.io_ptw_status_gva (io_ptw_0_status_gva_0), // @[Top.scala:30:7]
.io_ptw_status_tsr (io_ptw_0_status_tsr_0), // @[Top.scala:30:7]
.io_ptw_status_tw (io_ptw_0_status_tw_0), // @[Top.scala:30:7]
.io_ptw_status_tvm (io_ptw_0_status_tvm_0), // @[Top.scala:30:7]
.io_ptw_status_mxr (io_ptw_0_status_mxr_0), // @[Top.scala:30:7]
.io_ptw_status_sum (io_ptw_0_status_sum_0), // @[Top.scala:30:7]
.io_ptw_status_mprv (io_ptw_0_status_mprv_0), // @[Top.scala:30:7]
.io_ptw_status_fs (io_ptw_0_status_fs_0), // @[Top.scala:30:7]
.io_ptw_status_mpp (io_ptw_0_status_mpp_0), // @[Top.scala:30:7]
.io_ptw_status_spp (io_ptw_0_status_spp_0), // @[Top.scala:30:7]
.io_ptw_status_mpie (io_ptw_0_status_mpie_0), // @[Top.scala:30:7]
.io_ptw_status_spie (io_ptw_0_status_spie_0), // @[Top.scala:30:7]
.io_ptw_status_mie (io_ptw_0_status_mie_0), // @[Top.scala:30:7]
.io_ptw_status_sie (io_ptw_0_status_sie_0), // @[Top.scala:30:7]
.io_ptw_hstatus_spvp (io_ptw_0_hstatus_spvp_0), // @[Top.scala:30:7]
.io_ptw_hstatus_spv (io_ptw_0_hstatus_spv_0), // @[Top.scala:30:7]
.io_ptw_hstatus_gva (io_ptw_0_hstatus_gva_0), // @[Top.scala:30:7]
.io_ptw_gstatus_debug (io_ptw_0_gstatus_debug_0), // @[Top.scala:30:7]
.io_ptw_gstatus_cease (io_ptw_0_gstatus_cease_0), // @[Top.scala:30:7]
.io_ptw_gstatus_wfi (io_ptw_0_gstatus_wfi_0), // @[Top.scala:30:7]
.io_ptw_gstatus_isa (io_ptw_0_gstatus_isa_0), // @[Top.scala:30:7]
.io_ptw_gstatus_dprv (io_ptw_0_gstatus_dprv_0), // @[Top.scala:30:7]
.io_ptw_gstatus_dv (io_ptw_0_gstatus_dv_0), // @[Top.scala:30:7]
.io_ptw_gstatus_prv (io_ptw_0_gstatus_prv_0), // @[Top.scala:30:7]
.io_ptw_gstatus_v (io_ptw_0_gstatus_v_0), // @[Top.scala:30:7]
.io_ptw_gstatus_zero2 (io_ptw_0_gstatus_zero2_0), // @[Top.scala:30:7]
.io_ptw_gstatus_mpv (io_ptw_0_gstatus_mpv_0), // @[Top.scala:30:7]
.io_ptw_gstatus_gva (io_ptw_0_gstatus_gva_0), // @[Top.scala:30:7]
.io_ptw_gstatus_mbe (io_ptw_0_gstatus_mbe_0), // @[Top.scala:30:7]
.io_ptw_gstatus_sbe (io_ptw_0_gstatus_sbe_0), // @[Top.scala:30:7]
.io_ptw_gstatus_sxl (io_ptw_0_gstatus_sxl_0), // @[Top.scala:30:7]
.io_ptw_gstatus_zero1 (io_ptw_0_gstatus_zero1_0), // @[Top.scala:30:7]
.io_ptw_gstatus_tsr (io_ptw_0_gstatus_tsr_0), // @[Top.scala:30:7]
.io_ptw_gstatus_tw (io_ptw_0_gstatus_tw_0), // @[Top.scala:30:7]
.io_ptw_gstatus_tvm (io_ptw_0_gstatus_tvm_0), // @[Top.scala:30:7]
.io_ptw_gstatus_mxr (io_ptw_0_gstatus_mxr_0), // @[Top.scala:30:7]
.io_ptw_gstatus_sum (io_ptw_0_gstatus_sum_0), // @[Top.scala:30:7]
.io_ptw_gstatus_mprv (io_ptw_0_gstatus_mprv_0), // @[Top.scala:30:7]
.io_ptw_gstatus_fs (io_ptw_0_gstatus_fs_0), // @[Top.scala:30:7]
.io_ptw_gstatus_mpp (io_ptw_0_gstatus_mpp_0), // @[Top.scala:30:7]
.io_ptw_gstatus_vs (io_ptw_0_gstatus_vs_0), // @[Top.scala:30:7]
.io_ptw_gstatus_spp (io_ptw_0_gstatus_spp_0), // @[Top.scala:30:7]
.io_ptw_gstatus_mpie (io_ptw_0_gstatus_mpie_0), // @[Top.scala:30:7]
.io_ptw_gstatus_ube (io_ptw_0_gstatus_ube_0), // @[Top.scala:30:7]
.io_ptw_gstatus_spie (io_ptw_0_gstatus_spie_0), // @[Top.scala:30:7]
.io_ptw_gstatus_upie (io_ptw_0_gstatus_upie_0), // @[Top.scala:30:7]
.io_ptw_gstatus_mie (io_ptw_0_gstatus_mie_0), // @[Top.scala:30:7]
.io_ptw_gstatus_hie (io_ptw_0_gstatus_hie_0), // @[Top.scala:30:7]
.io_ptw_gstatus_sie (io_ptw_0_gstatus_sie_0), // @[Top.scala:30:7]
.io_ptw_gstatus_uie (io_ptw_0_gstatus_uie_0), // @[Top.scala:30:7]
.io_ptw_pmp_0_cfg_l (io_ptw_0_pmp_0_cfg_l_0), // @[Top.scala:30:7]
.io_ptw_pmp_0_cfg_a (io_ptw_0_pmp_0_cfg_a_0), // @[Top.scala:30:7]
.io_ptw_pmp_0_cfg_x (io_ptw_0_pmp_0_cfg_x_0), // @[Top.scala:30:7]
.io_ptw_pmp_0_cfg_w (io_ptw_0_pmp_0_cfg_w_0), // @[Top.scala:30:7]
.io_ptw_pmp_0_cfg_r (io_ptw_0_pmp_0_cfg_r_0), // @[Top.scala:30:7]
.io_ptw_pmp_0_addr (io_ptw_0_pmp_0_addr_0), // @[Top.scala:30:7]
.io_ptw_pmp_0_mask (io_ptw_0_pmp_0_mask_0), // @[Top.scala:30:7]
.io_ptw_pmp_1_cfg_l (io_ptw_0_pmp_1_cfg_l_0), // @[Top.scala:30:7]
.io_ptw_pmp_1_cfg_a (io_ptw_0_pmp_1_cfg_a_0), // @[Top.scala:30:7]
.io_ptw_pmp_1_cfg_x (io_ptw_0_pmp_1_cfg_x_0), // @[Top.scala:30:7]
.io_ptw_pmp_1_cfg_w (io_ptw_0_pmp_1_cfg_w_0), // @[Top.scala:30:7]
.io_ptw_pmp_1_cfg_r (io_ptw_0_pmp_1_cfg_r_0), // @[Top.scala:30:7]
.io_ptw_pmp_1_addr (io_ptw_0_pmp_1_addr_0), // @[Top.scala:30:7]
.io_ptw_pmp_1_mask (io_ptw_0_pmp_1_mask_0), // @[Top.scala:30:7]
.io_ptw_pmp_2_cfg_l (io_ptw_0_pmp_2_cfg_l_0), // @[Top.scala:30:7]
.io_ptw_pmp_2_cfg_a (io_ptw_0_pmp_2_cfg_a_0), // @[Top.scala:30:7]
.io_ptw_pmp_2_cfg_x (io_ptw_0_pmp_2_cfg_x_0), // @[Top.scala:30:7]
.io_ptw_pmp_2_cfg_w (io_ptw_0_pmp_2_cfg_w_0), // @[Top.scala:30:7]
.io_ptw_pmp_2_cfg_r (io_ptw_0_pmp_2_cfg_r_0), // @[Top.scala:30:7]
.io_ptw_pmp_2_addr (io_ptw_0_pmp_2_addr_0), // @[Top.scala:30:7]
.io_ptw_pmp_2_mask (io_ptw_0_pmp_2_mask_0), // @[Top.scala:30:7]
.io_ptw_pmp_3_cfg_l (io_ptw_0_pmp_3_cfg_l_0), // @[Top.scala:30:7]
.io_ptw_pmp_3_cfg_a (io_ptw_0_pmp_3_cfg_a_0), // @[Top.scala:30:7]
.io_ptw_pmp_3_cfg_x (io_ptw_0_pmp_3_cfg_x_0), // @[Top.scala:30:7]
.io_ptw_pmp_3_cfg_w (io_ptw_0_pmp_3_cfg_w_0), // @[Top.scala:30:7]
.io_ptw_pmp_3_cfg_r (io_ptw_0_pmp_3_cfg_r_0), // @[Top.scala:30:7]
.io_ptw_pmp_3_addr (io_ptw_0_pmp_3_addr_0), // @[Top.scala:30:7]
.io_ptw_pmp_3_mask (io_ptw_0_pmp_3_mask_0), // @[Top.scala:30:7]
.io_ptw_pmp_4_cfg_l (io_ptw_0_pmp_4_cfg_l_0), // @[Top.scala:30:7]
.io_ptw_pmp_4_cfg_a (io_ptw_0_pmp_4_cfg_a_0), // @[Top.scala:30:7]
.io_ptw_pmp_4_cfg_x (io_ptw_0_pmp_4_cfg_x_0), // @[Top.scala:30:7]
.io_ptw_pmp_4_cfg_w (io_ptw_0_pmp_4_cfg_w_0), // @[Top.scala:30:7]
.io_ptw_pmp_4_cfg_r (io_ptw_0_pmp_4_cfg_r_0), // @[Top.scala:30:7]
.io_ptw_pmp_4_addr (io_ptw_0_pmp_4_addr_0), // @[Top.scala:30:7]
.io_ptw_pmp_4_mask (io_ptw_0_pmp_4_mask_0), // @[Top.scala:30:7]
.io_ptw_pmp_5_cfg_l (io_ptw_0_pmp_5_cfg_l_0), // @[Top.scala:30:7]
.io_ptw_pmp_5_cfg_a (io_ptw_0_pmp_5_cfg_a_0), // @[Top.scala:30:7]
.io_ptw_pmp_5_cfg_x (io_ptw_0_pmp_5_cfg_x_0), // @[Top.scala:30:7]
.io_ptw_pmp_5_cfg_w (io_ptw_0_pmp_5_cfg_w_0), // @[Top.scala:30:7]
.io_ptw_pmp_5_cfg_r (io_ptw_0_pmp_5_cfg_r_0), // @[Top.scala:30:7]
.io_ptw_pmp_5_addr (io_ptw_0_pmp_5_addr_0), // @[Top.scala:30:7]
.io_ptw_pmp_5_mask (io_ptw_0_pmp_5_mask_0), // @[Top.scala:30:7]
.io_ptw_pmp_6_cfg_l (io_ptw_0_pmp_6_cfg_l_0), // @[Top.scala:30:7]
.io_ptw_pmp_6_cfg_a (io_ptw_0_pmp_6_cfg_a_0), // @[Top.scala:30:7]
.io_ptw_pmp_6_cfg_x (io_ptw_0_pmp_6_cfg_x_0), // @[Top.scala:30:7]
.io_ptw_pmp_6_cfg_w (io_ptw_0_pmp_6_cfg_w_0), // @[Top.scala:30:7]
.io_ptw_pmp_6_cfg_r (io_ptw_0_pmp_6_cfg_r_0), // @[Top.scala:30:7]
.io_ptw_pmp_6_addr (io_ptw_0_pmp_6_addr_0), // @[Top.scala:30:7]
.io_ptw_pmp_6_mask (io_ptw_0_pmp_6_mask_0), // @[Top.scala:30:7]
.io_ptw_pmp_7_cfg_l (io_ptw_0_pmp_7_cfg_l_0), // @[Top.scala:30:7]
.io_ptw_pmp_7_cfg_a (io_ptw_0_pmp_7_cfg_a_0), // @[Top.scala:30:7]
.io_ptw_pmp_7_cfg_x (io_ptw_0_pmp_7_cfg_x_0), // @[Top.scala:30:7]
.io_ptw_pmp_7_cfg_w (io_ptw_0_pmp_7_cfg_w_0), // @[Top.scala:30:7]
.io_ptw_pmp_7_cfg_r (io_ptw_0_pmp_7_cfg_r_0), // @[Top.scala:30:7]
.io_ptw_pmp_7_addr (io_ptw_0_pmp_7_addr_0), // @[Top.scala:30:7]
.io_ptw_pmp_7_mask (io_ptw_0_pmp_7_mask_0), // @[Top.scala:30:7]
.io_ptw_customCSRs_csrs_0_ren (io_ptw_0_customCSRs_csrs_0_ren_0), // @[Top.scala:30:7]
.io_ptw_customCSRs_csrs_0_wen (io_ptw_0_customCSRs_csrs_0_wen_0), // @[Top.scala:30:7]
.io_ptw_customCSRs_csrs_0_wdata (io_ptw_0_customCSRs_csrs_0_wdata_0), // @[Top.scala:30:7]
.io_ptw_customCSRs_csrs_0_value (io_ptw_0_customCSRs_csrs_0_value_0), // @[Top.scala:30:7]
.io_ptw_customCSRs_csrs_1_ren (io_ptw_0_customCSRs_csrs_1_ren_0), // @[Top.scala:30:7]
.io_ptw_customCSRs_csrs_1_wen (io_ptw_0_customCSRs_csrs_1_wen_0), // @[Top.scala:30:7]
.io_ptw_customCSRs_csrs_1_wdata (io_ptw_0_customCSRs_csrs_1_wdata_0), // @[Top.scala:30:7]
.io_ptw_customCSRs_csrs_1_value (io_ptw_0_customCSRs_csrs_1_value_0), // @[Top.scala:30:7]
.io_ptw_customCSRs_csrs_2_ren (io_ptw_0_customCSRs_csrs_2_ren_0), // @[Top.scala:30:7]
.io_ptw_customCSRs_csrs_2_wen (io_ptw_0_customCSRs_csrs_2_wen_0), // @[Top.scala:30:7]
.io_ptw_customCSRs_csrs_2_wdata (io_ptw_0_customCSRs_csrs_2_wdata_0), // @[Top.scala:30:7]
.io_ptw_customCSRs_csrs_2_value (io_ptw_0_customCSRs_csrs_2_value_0), // @[Top.scala:30:7]
.io_ptw_customCSRs_csrs_3_ren (io_ptw_0_customCSRs_csrs_3_ren_0), // @[Top.scala:30:7]
.io_ptw_customCSRs_csrs_3_wen (io_ptw_0_customCSRs_csrs_3_wen_0), // @[Top.scala:30:7]
.io_ptw_customCSRs_csrs_3_wdata (io_ptw_0_customCSRs_csrs_3_wdata_0), // @[Top.scala:30:7]
.io_ptw_customCSRs_csrs_3_value (io_ptw_0_customCSRs_csrs_3_value_0), // @[Top.scala:30:7]
.io_status_valid (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_valid), // @[Top.scala:35:31]
.io_status_bits_debug (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_debug), // @[Top.scala:35:31]
.io_status_bits_cease (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_cease), // @[Top.scala:35:31]
.io_status_bits_wfi (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_wfi), // @[Top.scala:35:31]
.io_status_bits_isa (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_isa), // @[Top.scala:35:31]
.io_status_bits_dprv (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_dprv), // @[Top.scala:35:31]
.io_status_bits_dv (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_dv), // @[Top.scala:35:31]
.io_status_bits_prv (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_prv), // @[Top.scala:35:31]
.io_status_bits_v (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_v), // @[Top.scala:35:31]
.io_status_bits_sd (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_sd), // @[Top.scala:35:31]
.io_status_bits_zero2 (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_zero2), // @[Top.scala:35:31]
.io_status_bits_mpv (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_mpv), // @[Top.scala:35:31]
.io_status_bits_gva (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_gva), // @[Top.scala:35:31]
.io_status_bits_mbe (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_mbe), // @[Top.scala:35:31]
.io_status_bits_sbe (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_sbe), // @[Top.scala:35:31]
.io_status_bits_sxl (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_sxl), // @[Top.scala:35:31]
.io_status_bits_uxl (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_uxl), // @[Top.scala:35:31]
.io_status_bits_sd_rv32 (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_sd_rv32), // @[Top.scala:35:31]
.io_status_bits_zero1 (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_zero1), // @[Top.scala:35:31]
.io_status_bits_tsr (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_tsr), // @[Top.scala:35:31]
.io_status_bits_tw (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_tw), // @[Top.scala:35:31]
.io_status_bits_tvm (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_tvm), // @[Top.scala:35:31]
.io_status_bits_mxr (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_mxr), // @[Top.scala:35:31]
.io_status_bits_sum (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_sum), // @[Top.scala:35:31]
.io_status_bits_mprv (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_mprv), // @[Top.scala:35:31]
.io_status_bits_xs (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_xs), // @[Top.scala:35:31]
.io_status_bits_fs (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_fs), // @[Top.scala:35:31]
.io_status_bits_mpp (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_mpp), // @[Top.scala:35:31]
.io_status_bits_vs (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_vs), // @[Top.scala:35:31]
.io_status_bits_spp (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_spp), // @[Top.scala:35:31]
.io_status_bits_mpie (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_mpie), // @[Top.scala:35:31]
.io_status_bits_ube (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_ube), // @[Top.scala:35:31]
.io_status_bits_spie (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_spie), // @[Top.scala:35:31]
.io_status_bits_upie (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_upie), // @[Top.scala:35:31]
.io_status_bits_mie (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_mie), // @[Top.scala:35:31]
.io_status_bits_hie (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_hie), // @[Top.scala:35:31]
.io_status_bits_sie (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_sie), // @[Top.scala:35:31]
.io_status_bits_uie (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_uie) // @[Top.scala:35:31]
); // @[MemStreamerAccel.scala:34:36]
TLBuffer_a32d256s5k5z4u buffers ( // @[Buffer.scala:80:47]
.clock (clock),
.reset (reset),
.auto_in_a_ready (_buffers_auto_in_a_ready),
.auto_in_a_valid (_l2_memloader_auto_master_out_a_valid), // @[MemStreamerAccel.scala:34:36]
.auto_in_a_bits_opcode (_l2_memloader_auto_master_out_a_bits_opcode), // @[MemStreamerAccel.scala:34:36]
.auto_in_a_bits_size (_l2_memloader_auto_master_out_a_bits_size), // @[MemStreamerAccel.scala:34:36]
.auto_in_a_bits_source (_l2_memloader_auto_master_out_a_bits_source), // @[MemStreamerAccel.scala:34:36]
.auto_in_a_bits_address (_l2_memloader_auto_master_out_a_bits_address), // @[MemStreamerAccel.scala:34:36]
.auto_in_a_bits_mask (_l2_memloader_auto_master_out_a_bits_mask), // @[MemStreamerAccel.scala:34:36]
.auto_in_a_bits_data (_l2_memloader_auto_master_out_a_bits_data), // @[MemStreamerAccel.scala:34:36]
.auto_in_d_ready (_l2_memloader_auto_master_out_d_ready), // @[MemStreamerAccel.scala:34:36]
.auto_in_d_valid (_buffers_auto_in_d_valid),
.auto_in_d_bits_opcode (_buffers_auto_in_d_bits_opcode),
.auto_in_d_bits_param (_buffers_auto_in_d_bits_param),
.auto_in_d_bits_size (_buffers_auto_in_d_bits_size),
.auto_in_d_bits_source (_buffers_auto_in_d_bits_source),
.auto_in_d_bits_sink (_buffers_auto_in_d_bits_sink),
.auto_in_d_bits_denied (_buffers_auto_in_d_bits_denied),
.auto_in_d_bits_data (_buffers_auto_in_d_bits_data),
.auto_in_d_bits_corrupt (_buffers_auto_in_d_bits_corrupt),
.auto_out_a_ready (widget_auto_anon_in_a_ready), // @[WidthWidget.scala:27:9]
.auto_out_a_valid (widget_auto_anon_in_a_valid),
.auto_out_a_bits_opcode (widget_auto_anon_in_a_bits_opcode),
.auto_out_a_bits_param (widget_auto_anon_in_a_bits_param),
.auto_out_a_bits_size (widget_auto_anon_in_a_bits_size),
.auto_out_a_bits_source (widget_auto_anon_in_a_bits_source),
.auto_out_a_bits_address (widget_auto_anon_in_a_bits_address),
.auto_out_a_bits_mask (widget_auto_anon_in_a_bits_mask),
.auto_out_a_bits_data (widget_auto_anon_in_a_bits_data),
.auto_out_a_bits_corrupt (widget_auto_anon_in_a_bits_corrupt),
.auto_out_d_ready (widget_auto_anon_in_d_ready),
.auto_out_d_valid (widget_auto_anon_in_d_valid), // @[WidthWidget.scala:27:9]
.auto_out_d_bits_opcode (widget_auto_anon_in_d_bits_opcode), // @[WidthWidget.scala:27:9]
.auto_out_d_bits_param (widget_auto_anon_in_d_bits_param), // @[WidthWidget.scala:27:9]
.auto_out_d_bits_size (widget_auto_anon_in_d_bits_size), // @[WidthWidget.scala:27:9]
.auto_out_d_bits_source (widget_auto_anon_in_d_bits_source), // @[WidthWidget.scala:27:9]
.auto_out_d_bits_sink (widget_auto_anon_in_d_bits_sink), // @[WidthWidget.scala:27:9]
.auto_out_d_bits_denied (widget_auto_anon_in_d_bits_denied), // @[WidthWidget.scala:27:9]
.auto_out_d_bits_data (widget_auto_anon_in_d_bits_data), // @[WidthWidget.scala:27:9]
.auto_out_d_bits_corrupt (widget_auto_anon_in_d_bits_corrupt) // @[WidthWidget.scala:27:9]
); // @[Buffer.scala:80:47]
L2MemHelper_1 l2_memwriter ( // @[MemStreamerAccel.scala:37:36]
.clock (clock),
.reset (reset),
.auto_master_out_a_ready (_buffers_1_auto_in_a_ready), // @[Buffer.scala:80:47]
.auto_master_out_a_valid (_l2_memwriter_auto_master_out_a_valid),
.auto_master_out_a_bits_opcode (_l2_memwriter_auto_master_out_a_bits_opcode),
.auto_master_out_a_bits_size (_l2_memwriter_auto_master_out_a_bits_size),
.auto_master_out_a_bits_source (_l2_memwriter_auto_master_out_a_bits_source),
.auto_master_out_a_bits_address (_l2_memwriter_auto_master_out_a_bits_address),
.auto_master_out_a_bits_mask (_l2_memwriter_auto_master_out_a_bits_mask),
.auto_master_out_a_bits_data (_l2_memwriter_auto_master_out_a_bits_data),
.auto_master_out_d_ready (_l2_memwriter_auto_master_out_d_ready),
.auto_master_out_d_valid (_buffers_1_auto_in_d_valid), // @[Buffer.scala:80:47]
.auto_master_out_d_bits_opcode (_buffers_1_auto_in_d_bits_opcode), // @[Buffer.scala:80:47]
.auto_master_out_d_bits_param (_buffers_1_auto_in_d_bits_param), // @[Buffer.scala:80:47]
.auto_master_out_d_bits_size (_buffers_1_auto_in_d_bits_size), // @[Buffer.scala:80:47]
.auto_master_out_d_bits_source (_buffers_1_auto_in_d_bits_source), // @[Buffer.scala:80:47]
.auto_master_out_d_bits_sink (_buffers_1_auto_in_d_bits_sink), // @[Buffer.scala:80:47]
.auto_master_out_d_bits_denied (_buffers_1_auto_in_d_bits_denied), // @[Buffer.scala:80:47]
.auto_master_out_d_bits_data (_buffers_1_auto_in_d_bits_data), // @[Buffer.scala:80:47]
.auto_master_out_d_bits_corrupt (_buffers_1_auto_in_d_bits_corrupt), // @[Buffer.scala:80:47]
.io_userif_req_ready (_l2_memwriter_io_userif_req_ready),
.io_userif_req_valid (_memwriter_io_l2io_req_valid), // @[MemStreamerAccel.scala:67:25]
.io_userif_req_bits_addr (_memwriter_io_l2io_req_bits_addr), // @[MemStreamerAccel.scala:67:25]
.io_userif_req_bits_size (_memwriter_io_l2io_req_bits_size), // @[MemStreamerAccel.scala:67:25]
.io_userif_req_bits_data (_memwriter_io_l2io_req_bits_data), // @[MemStreamerAccel.scala:67:25]
.io_userif_resp_valid (_l2_memwriter_io_userif_resp_valid),
.io_userif_resp_bits_data (_l2_memwriter_io_userif_resp_bits_data),
.io_userif_no_memops_inflight (_l2_memwriter_io_userif_no_memops_inflight),
.io_sfence (_memloader_io_src_info_cmd_router_cur_funct_io_sfence_out), // @[Top.scala:35:31]
.io_ptw_req_ready (io_ptw_1_req_ready_0), // @[Top.scala:30:7]
.io_ptw_req_valid (io_ptw_1_req_valid_0),
.io_ptw_req_bits_bits_addr (io_ptw_1_req_bits_bits_addr_0),
.io_ptw_req_bits_bits_need_gpa (io_ptw_1_req_bits_bits_need_gpa_0),
.io_ptw_resp_valid (io_ptw_1_resp_valid_0), // @[Top.scala:30:7]
.io_ptw_resp_bits_ae_ptw (io_ptw_1_resp_bits_ae_ptw_0), // @[Top.scala:30:7]
.io_ptw_resp_bits_ae_final (io_ptw_1_resp_bits_ae_final_0), // @[Top.scala:30:7]
.io_ptw_resp_bits_pf (io_ptw_1_resp_bits_pf_0), // @[Top.scala:30:7]
.io_ptw_resp_bits_gf (io_ptw_1_resp_bits_gf_0), // @[Top.scala:30:7]
.io_ptw_resp_bits_hr (io_ptw_1_resp_bits_hr_0), // @[Top.scala:30:7]
.io_ptw_resp_bits_hw (io_ptw_1_resp_bits_hw_0), // @[Top.scala:30:7]
.io_ptw_resp_bits_hx (io_ptw_1_resp_bits_hx_0), // @[Top.scala:30:7]
.io_ptw_resp_bits_pte_reserved_for_future (io_ptw_1_resp_bits_pte_reserved_for_future_0), // @[Top.scala:30:7]
.io_ptw_resp_bits_pte_ppn (io_ptw_1_resp_bits_pte_ppn_0), // @[Top.scala:30:7]
.io_ptw_resp_bits_pte_reserved_for_software (io_ptw_1_resp_bits_pte_reserved_for_software_0), // @[Top.scala:30:7]
.io_ptw_resp_bits_pte_d (io_ptw_1_resp_bits_pte_d_0), // @[Top.scala:30:7]
.io_ptw_resp_bits_pte_a (io_ptw_1_resp_bits_pte_a_0), // @[Top.scala:30:7]
.io_ptw_resp_bits_pte_g (io_ptw_1_resp_bits_pte_g_0), // @[Top.scala:30:7]
.io_ptw_resp_bits_pte_u (io_ptw_1_resp_bits_pte_u_0), // @[Top.scala:30:7]
.io_ptw_resp_bits_pte_x (io_ptw_1_resp_bits_pte_x_0), // @[Top.scala:30:7]
.io_ptw_resp_bits_pte_w (io_ptw_1_resp_bits_pte_w_0), // @[Top.scala:30:7]
.io_ptw_resp_bits_pte_r (io_ptw_1_resp_bits_pte_r_0), // @[Top.scala:30:7]
.io_ptw_resp_bits_pte_v (io_ptw_1_resp_bits_pte_v_0), // @[Top.scala:30:7]
.io_ptw_resp_bits_level (io_ptw_1_resp_bits_level_0), // @[Top.scala:30:7]
.io_ptw_resp_bits_homogeneous (io_ptw_1_resp_bits_homogeneous_0), // @[Top.scala:30:7]
.io_ptw_resp_bits_gpa_valid (io_ptw_1_resp_bits_gpa_valid_0), // @[Top.scala:30:7]
.io_ptw_resp_bits_gpa_bits (io_ptw_1_resp_bits_gpa_bits_0), // @[Top.scala:30:7]
.io_ptw_resp_bits_gpa_is_pte (io_ptw_1_resp_bits_gpa_is_pte_0), // @[Top.scala:30:7]
.io_ptw_ptbr_mode (io_ptw_1_ptbr_mode_0), // @[Top.scala:30:7]
.io_ptw_ptbr_ppn (io_ptw_1_ptbr_ppn_0), // @[Top.scala:30:7]
.io_ptw_status_debug (io_ptw_1_status_debug_0), // @[Top.scala:30:7]
.io_ptw_status_cease (io_ptw_1_status_cease_0), // @[Top.scala:30:7]
.io_ptw_status_wfi (io_ptw_1_status_wfi_0), // @[Top.scala:30:7]
.io_ptw_status_isa (io_ptw_1_status_isa_0), // @[Top.scala:30:7]
.io_ptw_status_dprv (io_ptw_1_status_dprv_0), // @[Top.scala:30:7]
.io_ptw_status_dv (io_ptw_1_status_dv_0), // @[Top.scala:30:7]
.io_ptw_status_prv (io_ptw_1_status_prv_0), // @[Top.scala:30:7]
.io_ptw_status_v (io_ptw_1_status_v_0), // @[Top.scala:30:7]
.io_ptw_status_mpv (io_ptw_1_status_mpv_0), // @[Top.scala:30:7]
.io_ptw_status_gva (io_ptw_1_status_gva_0), // @[Top.scala:30:7]
.io_ptw_status_tsr (io_ptw_1_status_tsr_0), // @[Top.scala:30:7]
.io_ptw_status_tw (io_ptw_1_status_tw_0), // @[Top.scala:30:7]
.io_ptw_status_tvm (io_ptw_1_status_tvm_0), // @[Top.scala:30:7]
.io_ptw_status_mxr (io_ptw_1_status_mxr_0), // @[Top.scala:30:7]
.io_ptw_status_sum (io_ptw_1_status_sum_0), // @[Top.scala:30:7]
.io_ptw_status_mprv (io_ptw_1_status_mprv_0), // @[Top.scala:30:7]
.io_ptw_status_fs (io_ptw_1_status_fs_0), // @[Top.scala:30:7]
.io_ptw_status_mpp (io_ptw_1_status_mpp_0), // @[Top.scala:30:7]
.io_ptw_status_spp (io_ptw_1_status_spp_0), // @[Top.scala:30:7]
.io_ptw_status_mpie (io_ptw_1_status_mpie_0), // @[Top.scala:30:7]
.io_ptw_status_spie (io_ptw_1_status_spie_0), // @[Top.scala:30:7]
.io_ptw_status_mie (io_ptw_1_status_mie_0), // @[Top.scala:30:7]
.io_ptw_status_sie (io_ptw_1_status_sie_0), // @[Top.scala:30:7]
.io_ptw_hstatus_spvp (io_ptw_1_hstatus_spvp_0), // @[Top.scala:30:7]
.io_ptw_hstatus_spv (io_ptw_1_hstatus_spv_0), // @[Top.scala:30:7]
.io_ptw_hstatus_gva (io_ptw_1_hstatus_gva_0), // @[Top.scala:30:7]
.io_ptw_gstatus_debug (io_ptw_1_gstatus_debug_0), // @[Top.scala:30:7]
.io_ptw_gstatus_cease (io_ptw_1_gstatus_cease_0), // @[Top.scala:30:7]
.io_ptw_gstatus_wfi (io_ptw_1_gstatus_wfi_0), // @[Top.scala:30:7]
.io_ptw_gstatus_isa (io_ptw_1_gstatus_isa_0), // @[Top.scala:30:7]
.io_ptw_gstatus_dprv (io_ptw_1_gstatus_dprv_0), // @[Top.scala:30:7]
.io_ptw_gstatus_dv (io_ptw_1_gstatus_dv_0), // @[Top.scala:30:7]
.io_ptw_gstatus_prv (io_ptw_1_gstatus_prv_0), // @[Top.scala:30:7]
.io_ptw_gstatus_v (io_ptw_1_gstatus_v_0), // @[Top.scala:30:7]
.io_ptw_gstatus_zero2 (io_ptw_1_gstatus_zero2_0), // @[Top.scala:30:7]
.io_ptw_gstatus_mpv (io_ptw_1_gstatus_mpv_0), // @[Top.scala:30:7]
.io_ptw_gstatus_gva (io_ptw_1_gstatus_gva_0), // @[Top.scala:30:7]
.io_ptw_gstatus_mbe (io_ptw_1_gstatus_mbe_0), // @[Top.scala:30:7]
.io_ptw_gstatus_sbe (io_ptw_1_gstatus_sbe_0), // @[Top.scala:30:7]
.io_ptw_gstatus_sxl (io_ptw_1_gstatus_sxl_0), // @[Top.scala:30:7]
.io_ptw_gstatus_zero1 (io_ptw_1_gstatus_zero1_0), // @[Top.scala:30:7]
.io_ptw_gstatus_tsr (io_ptw_1_gstatus_tsr_0), // @[Top.scala:30:7]
.io_ptw_gstatus_tw (io_ptw_1_gstatus_tw_0), // @[Top.scala:30:7]
.io_ptw_gstatus_tvm (io_ptw_1_gstatus_tvm_0), // @[Top.scala:30:7]
.io_ptw_gstatus_mxr (io_ptw_1_gstatus_mxr_0), // @[Top.scala:30:7]
.io_ptw_gstatus_sum (io_ptw_1_gstatus_sum_0), // @[Top.scala:30:7]
.io_ptw_gstatus_mprv (io_ptw_1_gstatus_mprv_0), // @[Top.scala:30:7]
.io_ptw_gstatus_fs (io_ptw_1_gstatus_fs_0), // @[Top.scala:30:7]
.io_ptw_gstatus_mpp (io_ptw_1_gstatus_mpp_0), // @[Top.scala:30:7]
.io_ptw_gstatus_vs (io_ptw_1_gstatus_vs_0), // @[Top.scala:30:7]
.io_ptw_gstatus_spp (io_ptw_1_gstatus_spp_0), // @[Top.scala:30:7]
.io_ptw_gstatus_mpie (io_ptw_1_gstatus_mpie_0), // @[Top.scala:30:7]
.io_ptw_gstatus_ube (io_ptw_1_gstatus_ube_0), // @[Top.scala:30:7]
.io_ptw_gstatus_spie (io_ptw_1_gstatus_spie_0), // @[Top.scala:30:7]
.io_ptw_gstatus_upie (io_ptw_1_gstatus_upie_0), // @[Top.scala:30:7]
.io_ptw_gstatus_mie (io_ptw_1_gstatus_mie_0), // @[Top.scala:30:7]
.io_ptw_gstatus_hie (io_ptw_1_gstatus_hie_0), // @[Top.scala:30:7]
.io_ptw_gstatus_sie (io_ptw_1_gstatus_sie_0), // @[Top.scala:30:7]
.io_ptw_gstatus_uie (io_ptw_1_gstatus_uie_0), // @[Top.scala:30:7]
.io_ptw_pmp_0_cfg_l (io_ptw_1_pmp_0_cfg_l_0), // @[Top.scala:30:7]
.io_ptw_pmp_0_cfg_a (io_ptw_1_pmp_0_cfg_a_0), // @[Top.scala:30:7]
.io_ptw_pmp_0_cfg_x (io_ptw_1_pmp_0_cfg_x_0), // @[Top.scala:30:7]
.io_ptw_pmp_0_cfg_w (io_ptw_1_pmp_0_cfg_w_0), // @[Top.scala:30:7]
.io_ptw_pmp_0_cfg_r (io_ptw_1_pmp_0_cfg_r_0), // @[Top.scala:30:7]
.io_ptw_pmp_0_addr (io_ptw_1_pmp_0_addr_0), // @[Top.scala:30:7]
.io_ptw_pmp_0_mask (io_ptw_1_pmp_0_mask_0), // @[Top.scala:30:7]
.io_ptw_pmp_1_cfg_l (io_ptw_1_pmp_1_cfg_l_0), // @[Top.scala:30:7]
.io_ptw_pmp_1_cfg_a (io_ptw_1_pmp_1_cfg_a_0), // @[Top.scala:30:7]
.io_ptw_pmp_1_cfg_x (io_ptw_1_pmp_1_cfg_x_0), // @[Top.scala:30:7]
.io_ptw_pmp_1_cfg_w (io_ptw_1_pmp_1_cfg_w_0), // @[Top.scala:30:7]
.io_ptw_pmp_1_cfg_r (io_ptw_1_pmp_1_cfg_r_0), // @[Top.scala:30:7]
.io_ptw_pmp_1_addr (io_ptw_1_pmp_1_addr_0), // @[Top.scala:30:7]
.io_ptw_pmp_1_mask (io_ptw_1_pmp_1_mask_0), // @[Top.scala:30:7]
.io_ptw_pmp_2_cfg_l (io_ptw_1_pmp_2_cfg_l_0), // @[Top.scala:30:7]
.io_ptw_pmp_2_cfg_a (io_ptw_1_pmp_2_cfg_a_0), // @[Top.scala:30:7]
.io_ptw_pmp_2_cfg_x (io_ptw_1_pmp_2_cfg_x_0), // @[Top.scala:30:7]
.io_ptw_pmp_2_cfg_w (io_ptw_1_pmp_2_cfg_w_0), // @[Top.scala:30:7]
.io_ptw_pmp_2_cfg_r (io_ptw_1_pmp_2_cfg_r_0), // @[Top.scala:30:7]
.io_ptw_pmp_2_addr (io_ptw_1_pmp_2_addr_0), // @[Top.scala:30:7]
.io_ptw_pmp_2_mask (io_ptw_1_pmp_2_mask_0), // @[Top.scala:30:7]
.io_ptw_pmp_3_cfg_l (io_ptw_1_pmp_3_cfg_l_0), // @[Top.scala:30:7]
.io_ptw_pmp_3_cfg_a (io_ptw_1_pmp_3_cfg_a_0), // @[Top.scala:30:7]
.io_ptw_pmp_3_cfg_x (io_ptw_1_pmp_3_cfg_x_0), // @[Top.scala:30:7]
.io_ptw_pmp_3_cfg_w (io_ptw_1_pmp_3_cfg_w_0), // @[Top.scala:30:7]
.io_ptw_pmp_3_cfg_r (io_ptw_1_pmp_3_cfg_r_0), // @[Top.scala:30:7]
.io_ptw_pmp_3_addr (io_ptw_1_pmp_3_addr_0), // @[Top.scala:30:7]
.io_ptw_pmp_3_mask (io_ptw_1_pmp_3_mask_0), // @[Top.scala:30:7]
.io_ptw_pmp_4_cfg_l (io_ptw_1_pmp_4_cfg_l_0), // @[Top.scala:30:7]
.io_ptw_pmp_4_cfg_a (io_ptw_1_pmp_4_cfg_a_0), // @[Top.scala:30:7]
.io_ptw_pmp_4_cfg_x (io_ptw_1_pmp_4_cfg_x_0), // @[Top.scala:30:7]
.io_ptw_pmp_4_cfg_w (io_ptw_1_pmp_4_cfg_w_0), // @[Top.scala:30:7]
.io_ptw_pmp_4_cfg_r (io_ptw_1_pmp_4_cfg_r_0), // @[Top.scala:30:7]
.io_ptw_pmp_4_addr (io_ptw_1_pmp_4_addr_0), // @[Top.scala:30:7]
.io_ptw_pmp_4_mask (io_ptw_1_pmp_4_mask_0), // @[Top.scala:30:7]
.io_ptw_pmp_5_cfg_l (io_ptw_1_pmp_5_cfg_l_0), // @[Top.scala:30:7]
.io_ptw_pmp_5_cfg_a (io_ptw_1_pmp_5_cfg_a_0), // @[Top.scala:30:7]
.io_ptw_pmp_5_cfg_x (io_ptw_1_pmp_5_cfg_x_0), // @[Top.scala:30:7]
.io_ptw_pmp_5_cfg_w (io_ptw_1_pmp_5_cfg_w_0), // @[Top.scala:30:7]
.io_ptw_pmp_5_cfg_r (io_ptw_1_pmp_5_cfg_r_0), // @[Top.scala:30:7]
.io_ptw_pmp_5_addr (io_ptw_1_pmp_5_addr_0), // @[Top.scala:30:7]
.io_ptw_pmp_5_mask (io_ptw_1_pmp_5_mask_0), // @[Top.scala:30:7]
.io_ptw_pmp_6_cfg_l (io_ptw_1_pmp_6_cfg_l_0), // @[Top.scala:30:7]
.io_ptw_pmp_6_cfg_a (io_ptw_1_pmp_6_cfg_a_0), // @[Top.scala:30:7]
.io_ptw_pmp_6_cfg_x (io_ptw_1_pmp_6_cfg_x_0), // @[Top.scala:30:7]
.io_ptw_pmp_6_cfg_w (io_ptw_1_pmp_6_cfg_w_0), // @[Top.scala:30:7]
.io_ptw_pmp_6_cfg_r (io_ptw_1_pmp_6_cfg_r_0), // @[Top.scala:30:7]
.io_ptw_pmp_6_addr (io_ptw_1_pmp_6_addr_0), // @[Top.scala:30:7]
.io_ptw_pmp_6_mask (io_ptw_1_pmp_6_mask_0), // @[Top.scala:30:7]
.io_ptw_pmp_7_cfg_l (io_ptw_1_pmp_7_cfg_l_0), // @[Top.scala:30:7]
.io_ptw_pmp_7_cfg_a (io_ptw_1_pmp_7_cfg_a_0), // @[Top.scala:30:7]
.io_ptw_pmp_7_cfg_x (io_ptw_1_pmp_7_cfg_x_0), // @[Top.scala:30:7]
.io_ptw_pmp_7_cfg_w (io_ptw_1_pmp_7_cfg_w_0), // @[Top.scala:30:7]
.io_ptw_pmp_7_cfg_r (io_ptw_1_pmp_7_cfg_r_0), // @[Top.scala:30:7]
.io_ptw_pmp_7_addr (io_ptw_1_pmp_7_addr_0), // @[Top.scala:30:7]
.io_ptw_pmp_7_mask (io_ptw_1_pmp_7_mask_0), // @[Top.scala:30:7]
.io_ptw_customCSRs_csrs_0_ren (io_ptw_1_customCSRs_csrs_0_ren_0), // @[Top.scala:30:7]
.io_ptw_customCSRs_csrs_0_wen (io_ptw_1_customCSRs_csrs_0_wen_0), // @[Top.scala:30:7]
.io_ptw_customCSRs_csrs_0_wdata (io_ptw_1_customCSRs_csrs_0_wdata_0), // @[Top.scala:30:7]
.io_ptw_customCSRs_csrs_0_value (io_ptw_1_customCSRs_csrs_0_value_0), // @[Top.scala:30:7]
.io_ptw_customCSRs_csrs_1_ren (io_ptw_1_customCSRs_csrs_1_ren_0), // @[Top.scala:30:7]
.io_ptw_customCSRs_csrs_1_wen (io_ptw_1_customCSRs_csrs_1_wen_0), // @[Top.scala:30:7]
.io_ptw_customCSRs_csrs_1_wdata (io_ptw_1_customCSRs_csrs_1_wdata_0), // @[Top.scala:30:7]
.io_ptw_customCSRs_csrs_1_value (io_ptw_1_customCSRs_csrs_1_value_0), // @[Top.scala:30:7]
.io_ptw_customCSRs_csrs_2_ren (io_ptw_1_customCSRs_csrs_2_ren_0), // @[Top.scala:30:7]
.io_ptw_customCSRs_csrs_2_wen (io_ptw_1_customCSRs_csrs_2_wen_0), // @[Top.scala:30:7]
.io_ptw_customCSRs_csrs_2_wdata (io_ptw_1_customCSRs_csrs_2_wdata_0), // @[Top.scala:30:7]
.io_ptw_customCSRs_csrs_2_value (io_ptw_1_customCSRs_csrs_2_value_0), // @[Top.scala:30:7]
.io_ptw_customCSRs_csrs_3_ren (io_ptw_1_customCSRs_csrs_3_ren_0), // @[Top.scala:30:7]
.io_ptw_customCSRs_csrs_3_wen (io_ptw_1_customCSRs_csrs_3_wen_0), // @[Top.scala:30:7]
.io_ptw_customCSRs_csrs_3_wdata (io_ptw_1_customCSRs_csrs_3_wdata_0), // @[Top.scala:30:7]
.io_ptw_customCSRs_csrs_3_value (io_ptw_1_customCSRs_csrs_3_value_0), // @[Top.scala:30:7]
.io_status_valid (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_valid), // @[Top.scala:35:31]
.io_status_bits_debug (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_debug), // @[Top.scala:35:31]
.io_status_bits_cease (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_cease), // @[Top.scala:35:31]
.io_status_bits_wfi (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_wfi), // @[Top.scala:35:31]
.io_status_bits_isa (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_isa), // @[Top.scala:35:31]
.io_status_bits_dprv (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_dprv), // @[Top.scala:35:31]
.io_status_bits_dv (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_dv), // @[Top.scala:35:31]
.io_status_bits_prv (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_prv), // @[Top.scala:35:31]
.io_status_bits_v (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_v), // @[Top.scala:35:31]
.io_status_bits_sd (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_sd), // @[Top.scala:35:31]
.io_status_bits_zero2 (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_zero2), // @[Top.scala:35:31]
.io_status_bits_mpv (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_mpv), // @[Top.scala:35:31]
.io_status_bits_gva (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_gva), // @[Top.scala:35:31]
.io_status_bits_mbe (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_mbe), // @[Top.scala:35:31]
.io_status_bits_sbe (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_sbe), // @[Top.scala:35:31]
.io_status_bits_sxl (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_sxl), // @[Top.scala:35:31]
.io_status_bits_uxl (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_uxl), // @[Top.scala:35:31]
.io_status_bits_sd_rv32 (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_sd_rv32), // @[Top.scala:35:31]
.io_status_bits_zero1 (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_zero1), // @[Top.scala:35:31]
.io_status_bits_tsr (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_tsr), // @[Top.scala:35:31]
.io_status_bits_tw (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_tw), // @[Top.scala:35:31]
.io_status_bits_tvm (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_tvm), // @[Top.scala:35:31]
.io_status_bits_mxr (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_mxr), // @[Top.scala:35:31]
.io_status_bits_sum (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_sum), // @[Top.scala:35:31]
.io_status_bits_mprv (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_mprv), // @[Top.scala:35:31]
.io_status_bits_xs (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_xs), // @[Top.scala:35:31]
.io_status_bits_fs (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_fs), // @[Top.scala:35:31]
.io_status_bits_mpp (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_mpp), // @[Top.scala:35:31]
.io_status_bits_vs (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_vs), // @[Top.scala:35:31]
.io_status_bits_spp (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_spp), // @[Top.scala:35:31]
.io_status_bits_mpie (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_mpie), // @[Top.scala:35:31]
.io_status_bits_ube (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_ube), // @[Top.scala:35:31]
.io_status_bits_spie (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_spie), // @[Top.scala:35:31]
.io_status_bits_upie (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_upie), // @[Top.scala:35:31]
.io_status_bits_mie (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_mie), // @[Top.scala:35:31]
.io_status_bits_hie (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_hie), // @[Top.scala:35:31]
.io_status_bits_sie (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_sie), // @[Top.scala:35:31]
.io_status_bits_uie (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_uie) // @[Top.scala:35:31]
); // @[MemStreamerAccel.scala:37:36]
TLBuffer_a32d256s5k5z4u_1 buffers_1 ( // @[Buffer.scala:80:47]
.clock (clock),
.reset (reset),
.auto_in_a_ready (_buffers_1_auto_in_a_ready),
.auto_in_a_valid (_l2_memwriter_auto_master_out_a_valid), // @[MemStreamerAccel.scala:37:36]
.auto_in_a_bits_opcode (_l2_memwriter_auto_master_out_a_bits_opcode), // @[MemStreamerAccel.scala:37:36]
.auto_in_a_bits_size (_l2_memwriter_auto_master_out_a_bits_size), // @[MemStreamerAccel.scala:37:36]
.auto_in_a_bits_source (_l2_memwriter_auto_master_out_a_bits_source), // @[MemStreamerAccel.scala:37:36]
.auto_in_a_bits_address (_l2_memwriter_auto_master_out_a_bits_address), // @[MemStreamerAccel.scala:37:36]
.auto_in_a_bits_mask (_l2_memwriter_auto_master_out_a_bits_mask), // @[MemStreamerAccel.scala:37:36]
.auto_in_a_bits_data (_l2_memwriter_auto_master_out_a_bits_data), // @[MemStreamerAccel.scala:37:36]
.auto_in_d_ready (_l2_memwriter_auto_master_out_d_ready), // @[MemStreamerAccel.scala:37:36]
.auto_in_d_valid (_buffers_1_auto_in_d_valid),
.auto_in_d_bits_opcode (_buffers_1_auto_in_d_bits_opcode),
.auto_in_d_bits_param (_buffers_1_auto_in_d_bits_param),
.auto_in_d_bits_size (_buffers_1_auto_in_d_bits_size),
.auto_in_d_bits_source (_buffers_1_auto_in_d_bits_source),
.auto_in_d_bits_sink (_buffers_1_auto_in_d_bits_sink),
.auto_in_d_bits_denied (_buffers_1_auto_in_d_bits_denied),
.auto_in_d_bits_data (_buffers_1_auto_in_d_bits_data),
.auto_in_d_bits_corrupt (_buffers_1_auto_in_d_bits_corrupt),
.auto_out_a_ready (widget_1_auto_anon_in_a_ready), // @[WidthWidget.scala:27:9]
.auto_out_a_valid (widget_1_auto_anon_in_a_valid),
.auto_out_a_bits_opcode (widget_1_auto_anon_in_a_bits_opcode),
.auto_out_a_bits_param (widget_1_auto_anon_in_a_bits_param),
.auto_out_a_bits_size (widget_1_auto_anon_in_a_bits_size),
.auto_out_a_bits_source (widget_1_auto_anon_in_a_bits_source),
.auto_out_a_bits_address (widget_1_auto_anon_in_a_bits_address),
.auto_out_a_bits_mask (widget_1_auto_anon_in_a_bits_mask),
.auto_out_a_bits_data (widget_1_auto_anon_in_a_bits_data),
.auto_out_a_bits_corrupt (widget_1_auto_anon_in_a_bits_corrupt),
.auto_out_d_ready (widget_1_auto_anon_in_d_ready),
.auto_out_d_valid (widget_1_auto_anon_in_d_valid), // @[WidthWidget.scala:27:9]
.auto_out_d_bits_opcode (widget_1_auto_anon_in_d_bits_opcode), // @[WidthWidget.scala:27:9]
.auto_out_d_bits_param (widget_1_auto_anon_in_d_bits_param), // @[WidthWidget.scala:27:9]
.auto_out_d_bits_size (widget_1_auto_anon_in_d_bits_size), // @[WidthWidget.scala:27:9]
.auto_out_d_bits_source (widget_1_auto_anon_in_d_bits_source), // @[WidthWidget.scala:27:9]
.auto_out_d_bits_sink (widget_1_auto_anon_in_d_bits_sink), // @[WidthWidget.scala:27:9]
.auto_out_d_bits_denied (widget_1_auto_anon_in_d_bits_denied), // @[WidthWidget.scala:27:9]
.auto_out_d_bits_data (widget_1_auto_anon_in_d_bits_data), // @[WidthWidget.scala:27:9]
.auto_out_d_bits_corrupt (widget_1_auto_anon_in_d_bits_corrupt) // @[WidthWidget.scala:27:9]
); // @[Buffer.scala:80:47]
MemLoader memloader ( // @[MemStreamerAccel.scala:63:25]
.clock (clock),
.reset (reset),
.io_l2helperUser_req_ready (_l2_memloader_io_userif_req_ready), // @[MemStreamerAccel.scala:34:36]
.io_l2helperUser_req_valid (_memloader_io_l2helperUser_req_valid),
.io_l2helperUser_req_bits_addr (_memloader_io_l2helperUser_req_bits_addr),
.io_l2helperUser_resp_ready (_memloader_io_l2helperUser_resp_ready),
.io_l2helperUser_resp_valid (_l2_memloader_io_userif_resp_valid), // @[MemStreamerAccel.scala:34:36]
.io_l2helperUser_resp_bits_data (_l2_memloader_io_userif_resp_bits_data), // @[MemStreamerAccel.scala:34:36]
.io_l2helperUser_no_memops_inflight (_l2_memloader_io_userif_no_memops_inflight), // @[MemStreamerAccel.scala:34:36]
.io_src_info_ready (_memloader_io_src_info_ready),
.io_src_info_valid (_memloader_io_src_info_cmd_router_cur_funct_io_src_info_valid), // @[Top.scala:35:31]
.io_src_info_bits_ip (_memloader_io_src_info_cmd_router_cur_funct_io_src_info_bits_ip), // @[Top.scala:35:31]
.io_src_info_bits_isize (_memloader_io_src_info_cmd_router_cur_funct_io_src_info_bits_isize), // @[Top.scala:35:31]
.io_consumer_user_consumed_bytes (_streamer_load_data_queue_io_enq_bits_chunk_data_io_mem_stream_user_consumed_bytes), // @[Top.scala:36:29]
.io_consumer_available_output_bytes (_memloader_io_consumer_available_output_bytes),
.io_consumer_output_valid (_memloader_io_consumer_output_valid),
.io_consumer_output_ready (_streamer_load_data_queue_io_enq_bits_chunk_data_io_mem_stream_output_ready), // @[Top.scala:36:29]
.io_consumer_output_data (_memloader_io_consumer_output_data),
.io_consumer_output_last_chunk (_memloader_io_consumer_output_last_chunk)
); // @[MemStreamerAccel.scala:63:25]
CommandRouter memloader_io_src_info_cmd_router ( // @[Top.scala:35:31]
.clock (clock),
.reset (reset),
.cur_funct_io_rocc_in_ready (io_cmd_ready_0),
.cur_funct_io_rocc_in_valid (io_cmd_valid_0), // @[Top.scala:30:7]
.cur_funct_io_rocc_in_bits_inst_funct (io_cmd_bits_inst_funct_0), // @[Top.scala:30:7]
.cur_funct_io_rocc_in_bits_inst_rs2 (io_cmd_bits_inst_rs2_0), // @[Top.scala:30:7]
.cur_funct_io_rocc_in_bits_inst_rs1 (io_cmd_bits_inst_rs1_0), // @[Top.scala:30:7]
.cur_funct_io_rocc_in_bits_inst_xd (io_cmd_bits_inst_xd_0), // @[Top.scala:30:7]
.cur_funct_io_rocc_in_bits_inst_xs1 (io_cmd_bits_inst_xs1_0), // @[Top.scala:30:7]
.cur_funct_io_rocc_in_bits_inst_xs2 (io_cmd_bits_inst_xs2_0), // @[Top.scala:30:7]
.cur_funct_io_rocc_in_bits_inst_rd (io_cmd_bits_inst_rd_0), // @[Top.scala:30:7]
.cur_funct_io_rocc_in_bits_inst_opcode (io_cmd_bits_inst_opcode_0), // @[Top.scala:30:7]
.cur_funct_io_rocc_in_bits_rs1 (io_cmd_bits_rs1_0), // @[Top.scala:30:7]
.cur_funct_io_rocc_in_bits_rs2 (io_cmd_bits_rs2_0), // @[Top.scala:30:7]
.cur_funct_io_rocc_in_bits_status_debug (io_cmd_bits_status_debug_0), // @[Top.scala:30:7]
.cur_funct_io_rocc_in_bits_status_cease (io_cmd_bits_status_cease_0), // @[Top.scala:30:7]
.cur_funct_io_rocc_in_bits_status_wfi (io_cmd_bits_status_wfi_0), // @[Top.scala:30:7]
.cur_funct_io_rocc_in_bits_status_isa (io_cmd_bits_status_isa_0), // @[Top.scala:30:7]
.cur_funct_io_rocc_in_bits_status_dprv (io_cmd_bits_status_dprv_0), // @[Top.scala:30:7]
.cur_funct_io_rocc_in_bits_status_dv (io_cmd_bits_status_dv_0), // @[Top.scala:30:7]
.cur_funct_io_rocc_in_bits_status_prv (io_cmd_bits_status_prv_0), // @[Top.scala:30:7]
.cur_funct_io_rocc_in_bits_status_v (io_cmd_bits_status_v_0), // @[Top.scala:30:7]
.cur_funct_io_rocc_in_bits_status_sd (io_cmd_bits_status_sd_0), // @[Top.scala:30:7]
.cur_funct_io_rocc_in_bits_status_zero2 (io_cmd_bits_status_zero2_0), // @[Top.scala:30:7]
.cur_funct_io_rocc_in_bits_status_mpv (io_cmd_bits_status_mpv_0), // @[Top.scala:30:7]
.cur_funct_io_rocc_in_bits_status_gva (io_cmd_bits_status_gva_0), // @[Top.scala:30:7]
.cur_funct_io_rocc_in_bits_status_mbe (io_cmd_bits_status_mbe_0), // @[Top.scala:30:7]
.cur_funct_io_rocc_in_bits_status_sbe (io_cmd_bits_status_sbe_0), // @[Top.scala:30:7]
.cur_funct_io_rocc_in_bits_status_sxl (io_cmd_bits_status_sxl_0), // @[Top.scala:30:7]
.cur_funct_io_rocc_in_bits_status_uxl (io_cmd_bits_status_uxl_0), // @[Top.scala:30:7]
.cur_funct_io_rocc_in_bits_status_sd_rv32 (io_cmd_bits_status_sd_rv32_0), // @[Top.scala:30:7]
.cur_funct_io_rocc_in_bits_status_zero1 (io_cmd_bits_status_zero1_0), // @[Top.scala:30:7]
.cur_funct_io_rocc_in_bits_status_tsr (io_cmd_bits_status_tsr_0), // @[Top.scala:30:7]
.cur_funct_io_rocc_in_bits_status_tw (io_cmd_bits_status_tw_0), // @[Top.scala:30:7]
.cur_funct_io_rocc_in_bits_status_tvm (io_cmd_bits_status_tvm_0), // @[Top.scala:30:7]
.cur_funct_io_rocc_in_bits_status_mxr (io_cmd_bits_status_mxr_0), // @[Top.scala:30:7]
.cur_funct_io_rocc_in_bits_status_sum (io_cmd_bits_status_sum_0), // @[Top.scala:30:7]
.cur_funct_io_rocc_in_bits_status_mprv (io_cmd_bits_status_mprv_0), // @[Top.scala:30:7]
.cur_funct_io_rocc_in_bits_status_xs (io_cmd_bits_status_xs_0), // @[Top.scala:30:7]
.cur_funct_io_rocc_in_bits_status_fs (io_cmd_bits_status_fs_0), // @[Top.scala:30:7]
.cur_funct_io_rocc_in_bits_status_mpp (io_cmd_bits_status_mpp_0), // @[Top.scala:30:7]
.cur_funct_io_rocc_in_bits_status_vs (io_cmd_bits_status_vs_0), // @[Top.scala:30:7]
.cur_funct_io_rocc_in_bits_status_spp (io_cmd_bits_status_spp_0), // @[Top.scala:30:7]
.cur_funct_io_rocc_in_bits_status_mpie (io_cmd_bits_status_mpie_0), // @[Top.scala:30:7]
.cur_funct_io_rocc_in_bits_status_ube (io_cmd_bits_status_ube_0), // @[Top.scala:30:7]
.cur_funct_io_rocc_in_bits_status_spie (io_cmd_bits_status_spie_0), // @[Top.scala:30:7]
.cur_funct_io_rocc_in_bits_status_upie (io_cmd_bits_status_upie_0), // @[Top.scala:30:7]
.cur_funct_io_rocc_in_bits_status_mie (io_cmd_bits_status_mie_0), // @[Top.scala:30:7]
.cur_funct_io_rocc_in_bits_status_hie (io_cmd_bits_status_hie_0), // @[Top.scala:30:7]
.cur_funct_io_rocc_in_bits_status_sie (io_cmd_bits_status_sie_0), // @[Top.scala:30:7]
.cur_funct_io_rocc_in_bits_status_uie (io_cmd_bits_status_uie_0), // @[Top.scala:30:7]
.cur_funct_io_rocc_out_ready (io_resp_ready_0), // @[Top.scala:30:7]
.cur_funct_io_rocc_out_valid (io_resp_valid_0),
.cur_funct_io_rocc_out_bits_rd (io_resp_bits_rd_0),
.cur_funct_io_rocc_out_bits_data (io_resp_bits_data_0),
.cur_funct_io_dmem_status_out_valid (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_valid),
.cur_funct_io_dmem_status_out_bits_status_debug (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_debug),
.cur_funct_io_dmem_status_out_bits_status_cease (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_cease),
.cur_funct_io_dmem_status_out_bits_status_wfi (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_wfi),
.cur_funct_io_dmem_status_out_bits_status_isa (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_isa),
.cur_funct_io_dmem_status_out_bits_status_dprv (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_dprv),
.cur_funct_io_dmem_status_out_bits_status_dv (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_dv),
.cur_funct_io_dmem_status_out_bits_status_prv (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_prv),
.cur_funct_io_dmem_status_out_bits_status_v (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_v),
.cur_funct_io_dmem_status_out_bits_status_sd (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_sd),
.cur_funct_io_dmem_status_out_bits_status_zero2 (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_zero2),
.cur_funct_io_dmem_status_out_bits_status_mpv (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_mpv),
.cur_funct_io_dmem_status_out_bits_status_gva (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_gva),
.cur_funct_io_dmem_status_out_bits_status_mbe (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_mbe),
.cur_funct_io_dmem_status_out_bits_status_sbe (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_sbe),
.cur_funct_io_dmem_status_out_bits_status_sxl (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_sxl),
.cur_funct_io_dmem_status_out_bits_status_uxl (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_uxl),
.cur_funct_io_dmem_status_out_bits_status_sd_rv32 (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_sd_rv32),
.cur_funct_io_dmem_status_out_bits_status_zero1 (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_zero1),
.cur_funct_io_dmem_status_out_bits_status_tsr (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_tsr),
.cur_funct_io_dmem_status_out_bits_status_tw (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_tw),
.cur_funct_io_dmem_status_out_bits_status_tvm (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_tvm),
.cur_funct_io_dmem_status_out_bits_status_mxr (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_mxr),
.cur_funct_io_dmem_status_out_bits_status_sum (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_sum),
.cur_funct_io_dmem_status_out_bits_status_mprv (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_mprv),
.cur_funct_io_dmem_status_out_bits_status_xs (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_xs),
.cur_funct_io_dmem_status_out_bits_status_fs (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_fs),
.cur_funct_io_dmem_status_out_bits_status_mpp (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_mpp),
.cur_funct_io_dmem_status_out_bits_status_vs (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_vs),
.cur_funct_io_dmem_status_out_bits_status_spp (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_spp),
.cur_funct_io_dmem_status_out_bits_status_mpie (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_mpie),
.cur_funct_io_dmem_status_out_bits_status_ube (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_ube),
.cur_funct_io_dmem_status_out_bits_status_spie (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_spie),
.cur_funct_io_dmem_status_out_bits_status_upie (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_upie),
.cur_funct_io_dmem_status_out_bits_status_mie (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_mie),
.cur_funct_io_dmem_status_out_bits_status_hie (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_hie),
.cur_funct_io_dmem_status_out_bits_status_sie (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_sie),
.cur_funct_io_dmem_status_out_bits_status_uie (_memloader_io_src_info_cmd_router_cur_funct_io_dmem_status_out_bits_status_uie),
.cur_funct_io_sfence_out (_memloader_io_src_info_cmd_router_cur_funct_io_sfence_out),
.cur_funct_io_src_info_ready (_memloader_io_src_info_ready), // @[MemStreamerAccel.scala:63:25]
.cur_funct_io_src_info_valid (_memloader_io_src_info_cmd_router_cur_funct_io_src_info_valid),
.cur_funct_io_src_info_bits_ip (_memloader_io_src_info_cmd_router_cur_funct_io_src_info_bits_ip),
.cur_funct_io_src_info_bits_isize (_memloader_io_src_info_cmd_router_cur_funct_io_src_info_bits_isize),
.cur_funct_io_dest_info_ready (_memwriter_io_decompress_dest_info_ready), // @[MemStreamerAccel.scala:67:25]
.cur_funct_io_dest_info_valid (_memloader_io_src_info_cmd_router_cur_funct_io_dest_info_valid),
.cur_funct_io_dest_info_bits_op (_memloader_io_src_info_cmd_router_cur_funct_io_dest_info_bits_op),
.cur_funct_io_dest_info_bits_cmpflag (_memloader_io_src_info_cmd_router_cur_funct_io_dest_info_bits_cmpflag),
.cur_funct_io_bufs_completed (_memwriter_io_bufs_completed), // @[MemStreamerAccel.scala:67:25]
.cur_funct_io_no_writes_inflight (_memwriter_io_no_writes_inflight), // @[MemStreamerAccel.scala:67:25]
.cur_funct_io_key_valid (_memloader_io_src_info_cmd_router_cur_funct_io_key_valid),
.cur_funct_io_key_bits (_memloader_io_src_info_cmd_router_cur_funct_io_key_bits),
.cur_funct_io_mode_valid (_memloader_io_src_info_cmd_router_cur_funct_io_mode_valid),
.cur_funct_io_mode_bits (_memloader_io_src_info_cmd_router_cur_funct_io_mode_bits)
); // @[Top.scala:35:31]
MemWriter32 memwriter ( // @[MemStreamerAccel.scala:67:25]
.clock (clock),
.reset (reset),
.io_memwrites_in_ready (_memwriter_io_memwrites_in_ready),
.io_memwrites_in_valid (_streamer_load_data_queue_io_enq_bits_chunk_data_io_memwrites_in_valid), // @[Top.scala:36:29]
.io_memwrites_in_bits_data (_streamer_load_data_queue_io_enq_bits_chunk_data_io_memwrites_in_bits_data), // @[Top.scala:36:29]
.io_memwrites_in_bits_validbytes (_streamer_load_data_queue_io_enq_bits_chunk_data_io_memwrites_in_bits_validbytes), // @[Top.scala:36:29]
.io_memwrites_in_bits_end_of_message (_streamer_load_data_queue_io_enq_bits_chunk_data_io_memwrites_in_bits_end_of_message), // @[Top.scala:36:29]
.io_l2io_req_ready (_l2_memwriter_io_userif_req_ready), // @[MemStreamerAccel.scala:37:36]
.io_l2io_req_valid (_memwriter_io_l2io_req_valid),
.io_l2io_req_bits_addr (_memwriter_io_l2io_req_bits_addr),
.io_l2io_req_bits_size (_memwriter_io_l2io_req_bits_size),
.io_l2io_req_bits_data (_memwriter_io_l2io_req_bits_data),
.io_l2io_resp_valid (_l2_memwriter_io_userif_resp_valid), // @[MemStreamerAccel.scala:37:36]
.io_l2io_resp_bits_data (_l2_memwriter_io_userif_resp_bits_data), // @[MemStreamerAccel.scala:37:36]
.io_l2io_no_memops_inflight (_l2_memwriter_io_userif_no_memops_inflight), // @[MemStreamerAccel.scala:37:36]
.io_decompress_dest_info_ready (_memwriter_io_decompress_dest_info_ready),
.io_decompress_dest_info_valid (_memloader_io_src_info_cmd_router_cur_funct_io_dest_info_valid), // @[Top.scala:35:31]
.io_decompress_dest_info_bits_op (_memloader_io_src_info_cmd_router_cur_funct_io_dest_info_bits_op), // @[Top.scala:35:31]
.io_decompress_dest_info_bits_cmpflag (_memloader_io_src_info_cmd_router_cur_funct_io_dest_info_bits_cmpflag), // @[Top.scala:35:31]
.io_bufs_completed (_memwriter_io_bufs_completed),
.io_no_writes_inflight (_memwriter_io_no_writes_inflight)
); // @[MemStreamerAccel.scala:67:25]
AES256ECB streamer ( // @[Top.scala:36:29]
.clock (clock),
.reset (reset),
.load_data_queue_io_enq_bits_chunk_data_io_mem_stream_user_consumed_bytes (_streamer_load_data_queue_io_enq_bits_chunk_data_io_mem_stream_user_consumed_bytes),
.load_data_queue_io_enq_bits_chunk_data_io_mem_stream_available_output_bytes (_memloader_io_consumer_available_output_bytes), // @[MemStreamerAccel.scala:63:25]
.load_data_queue_io_enq_bits_chunk_data_io_mem_stream_output_valid (_memloader_io_consumer_output_valid), // @[MemStreamerAccel.scala:63:25]
.load_data_queue_io_enq_bits_chunk_data_io_mem_stream_output_ready (_streamer_load_data_queue_io_enq_bits_chunk_data_io_mem_stream_output_ready),
.load_data_queue_io_enq_bits_chunk_data_io_mem_stream_output_data (_memloader_io_consumer_output_data), // @[MemStreamerAccel.scala:63:25]
.load_data_queue_io_enq_bits_chunk_data_io_mem_stream_output_last_chunk (_memloader_io_consumer_output_last_chunk), // @[MemStreamerAccel.scala:63:25]
.load_data_queue_io_enq_bits_chunk_data_io_memwrites_in_ready (_memwriter_io_memwrites_in_ready), // @[MemStreamerAccel.scala:67:25]
.load_data_queue_io_enq_bits_chunk_data_io_memwrites_in_valid (_streamer_load_data_queue_io_enq_bits_chunk_data_io_memwrites_in_valid),
.load_data_queue_io_enq_bits_chunk_data_io_memwrites_in_bits_data (_streamer_load_data_queue_io_enq_bits_chunk_data_io_memwrites_in_bits_data),
.load_data_queue_io_enq_bits_chunk_data_io_memwrites_in_bits_validbytes (_streamer_load_data_queue_io_enq_bits_chunk_data_io_memwrites_in_bits_validbytes),
.load_data_queue_io_enq_bits_chunk_data_io_memwrites_in_bits_end_of_message (_streamer_load_data_queue_io_enq_bits_chunk_data_io_memwrites_in_bits_end_of_message),
.load_data_queue_io_enq_bits_chunk_data_io_key_valid (_memloader_io_src_info_cmd_router_cur_funct_io_key_valid), // @[Top.scala:35:31]
.load_data_queue_io_enq_bits_chunk_data_io_key_bits (_memloader_io_src_info_cmd_router_cur_funct_io_key_bits), // @[Top.scala:35:31]
.load_data_queue_io_enq_bits_chunk_data_io_mode_valid (_memloader_io_src_info_cmd_router_cur_funct_io_mode_valid), // @[Top.scala:35:31]
.load_data_queue_io_enq_bits_chunk_data_io_mode_bits (_memloader_io_src_info_cmd_router_cur_funct_io_mode_bits) // @[Top.scala:35:31]
); // @[Top.scala:36:29]
assign auto_atl_out_1_a_valid = auto_atl_out_1_a_valid_0; // @[Top.scala:30:7]
assign auto_atl_out_1_a_bits_opcode = auto_atl_out_1_a_bits_opcode_0; // @[Top.scala:30:7]
assign auto_atl_out_1_a_bits_param = auto_atl_out_1_a_bits_param_0; // @[Top.scala:30:7]
assign auto_atl_out_1_a_bits_size = auto_atl_out_1_a_bits_size_0; // @[Top.scala:30:7]
assign auto_atl_out_1_a_bits_source = auto_atl_out_1_a_bits_source_0; // @[Top.scala:30:7]
assign auto_atl_out_1_a_bits_address = auto_atl_out_1_a_bits_address_0; // @[Top.scala:30:7]
assign auto_atl_out_1_a_bits_mask = auto_atl_out_1_a_bits_mask_0; // @[Top.scala:30:7]
assign auto_atl_out_1_a_bits_data = auto_atl_out_1_a_bits_data_0; // @[Top.scala:30:7]
assign auto_atl_out_1_a_bits_corrupt = auto_atl_out_1_a_bits_corrupt_0; // @[Top.scala:30:7]
assign auto_atl_out_1_d_ready = auto_atl_out_1_d_ready_0; // @[Top.scala:30:7]
assign auto_atl_out_0_a_valid = auto_atl_out_0_a_valid_0; // @[Top.scala:30:7]
assign auto_atl_out_0_a_bits_opcode = auto_atl_out_0_a_bits_opcode_0; // @[Top.scala:30:7]
assign auto_atl_out_0_a_bits_param = auto_atl_out_0_a_bits_param_0; // @[Top.scala:30:7]
assign auto_atl_out_0_a_bits_size = auto_atl_out_0_a_bits_size_0; // @[Top.scala:30:7]
assign auto_atl_out_0_a_bits_source = auto_atl_out_0_a_bits_source_0; // @[Top.scala:30:7]
assign auto_atl_out_0_a_bits_address = auto_atl_out_0_a_bits_address_0; // @[Top.scala:30:7]
assign auto_atl_out_0_a_bits_mask = auto_atl_out_0_a_bits_mask_0; // @[Top.scala:30:7]
assign auto_atl_out_0_a_bits_data = auto_atl_out_0_a_bits_data_0; // @[Top.scala:30:7]
assign auto_atl_out_0_a_bits_corrupt = auto_atl_out_0_a_bits_corrupt_0; // @[Top.scala:30:7]
assign auto_atl_out_0_d_ready = auto_atl_out_0_d_ready_0; // @[Top.scala:30:7]
assign io_cmd_ready = io_cmd_ready_0; // @[Top.scala:30:7]
assign io_resp_valid = io_resp_valid_0; // @[Top.scala:30:7]
assign io_resp_bits_rd = io_resp_bits_rd_0; // @[Top.scala:30:7]
assign io_resp_bits_data = io_resp_bits_data_0; // @[Top.scala:30:7]
assign io_ptw_0_req_valid = io_ptw_0_req_valid_0; // @[Top.scala:30:7]
assign io_ptw_0_req_bits_bits_addr = io_ptw_0_req_bits_bits_addr_0; // @[Top.scala:30:7]
assign io_ptw_0_req_bits_bits_need_gpa = io_ptw_0_req_bits_bits_need_gpa_0; // @[Top.scala:30:7]
assign io_ptw_1_req_valid = io_ptw_1_req_valid_0; // @[Top.scala:30:7]
assign io_ptw_1_req_bits_bits_addr = io_ptw_1_req_bits_bits_addr_0; // @[Top.scala:30:7]
assign io_ptw_1_req_bits_bits_need_gpa = io_ptw_1_req_bits_bits_need_gpa_0; // @[Top.scala:30:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File AsyncQueue.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
case class AsyncQueueParams(
depth: Int = 8,
sync: Int = 3,
safe: Boolean = true,
// If safe is true, then effort is made to resynchronize the crossing indices when either side is reset.
// This makes it safe/possible to reset one side of the crossing (but not the other) when the queue is empty.
narrow: Boolean = false)
// If narrow is true then the read mux is moved to the source side of the crossing.
// This reduces the number of level shifters in the case where the clock crossing is also a voltage crossing,
// at the expense of a combinational path from the sink to the source and back to the sink.
{
require (depth > 0 && isPow2(depth))
require (sync >= 2)
val bits = log2Ceil(depth)
val wires = if (narrow) 1 else depth
}
object AsyncQueueParams {
// When there is only one entry, we don't need narrow.
def singleton(sync: Int = 3, safe: Boolean = true) = AsyncQueueParams(1, sync, safe, false)
}
class AsyncBundleSafety extends Bundle {
val ridx_valid = Input (Bool())
val widx_valid = Output(Bool())
val source_reset_n = Output(Bool())
val sink_reset_n = Input (Bool())
}
class AsyncBundle[T <: Data](private val gen: T, val params: AsyncQueueParams = AsyncQueueParams()) extends Bundle {
// Data-path synchronization
val mem = Output(Vec(params.wires, gen))
val ridx = Input (UInt((params.bits+1).W))
val widx = Output(UInt((params.bits+1).W))
val index = params.narrow.option(Input(UInt(params.bits.W)))
// Signals used to self-stabilize a safe AsyncQueue
val safe = params.safe.option(new AsyncBundleSafety)
}
object GrayCounter {
def apply(bits: Int, increment: Bool = true.B, clear: Bool = false.B, name: String = "binary"): UInt = {
val incremented = Wire(UInt(bits.W))
val binary = RegNext(next=incremented, init=0.U).suggestName(name)
incremented := Mux(clear, 0.U, binary + increment.asUInt)
incremented ^ (incremented >> 1)
}
}
class AsyncValidSync(sync: Int, desc: String) extends RawModule {
val io = IO(new Bundle {
val in = Input(Bool())
val out = Output(Bool())
})
val clock = IO(Input(Clock()))
val reset = IO(Input(AsyncReset()))
withClockAndReset(clock, reset){
io.out := AsyncResetSynchronizerShiftReg(io.in, sync, Some(desc))
}
}
class AsyncQueueSource[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module {
override def desiredName = s"AsyncQueueSource_${gen.typeName}"
val io = IO(new Bundle {
// These come from the source domain
val enq = Flipped(Decoupled(gen))
// These cross to the sink clock domain
val async = new AsyncBundle(gen, params)
})
val bits = params.bits
val sink_ready = WireInit(true.B)
val mem = Reg(Vec(params.depth, gen)) // This does NOT need to be reset at all.
val widx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.enq.fire, !sink_ready, "widx_bin"))
val ridx = AsyncResetSynchronizerShiftReg(io.async.ridx, params.sync, Some("ridx_gray"))
val ready = sink_ready && widx =/= (ridx ^ (params.depth | params.depth >> 1).U)
val index = if (bits == 0) 0.U else io.async.widx(bits-1, 0) ^ (io.async.widx(bits, bits) << (bits-1))
when (io.enq.fire) { mem(index) := io.enq.bits }
val ready_reg = withReset(reset.asAsyncReset)(RegNext(next=ready, init=false.B).suggestName("ready_reg"))
io.enq.ready := ready_reg && sink_ready
val widx_reg = withReset(reset.asAsyncReset)(RegNext(next=widx, init=0.U).suggestName("widx_gray"))
io.async.widx := widx_reg
io.async.index match {
case Some(index) => io.async.mem(0) := mem(index)
case None => io.async.mem := mem
}
io.async.safe.foreach { sio =>
val source_valid_0 = Module(new AsyncValidSync(params.sync, "source_valid_0"))
val source_valid_1 = Module(new AsyncValidSync(params.sync, "source_valid_1"))
val sink_extend = Module(new AsyncValidSync(params.sync, "sink_extend"))
val sink_valid = Module(new AsyncValidSync(params.sync, "sink_valid"))
source_valid_0.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
source_valid_1.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
sink_extend .reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
sink_valid .reset := reset.asAsyncReset
source_valid_0.clock := clock
source_valid_1.clock := clock
sink_extend .clock := clock
sink_valid .clock := clock
source_valid_0.io.in := true.B
source_valid_1.io.in := source_valid_0.io.out
sio.widx_valid := source_valid_1.io.out
sink_extend.io.in := sio.ridx_valid
sink_valid.io.in := sink_extend.io.out
sink_ready := sink_valid.io.out
sio.source_reset_n := !reset.asBool
// Assert that if there is stuff in the queue, then reset cannot happen
// Impossible to write because dequeue can occur on the receiving side,
// then reset allowed to happen, but write side cannot know that dequeue
// occurred.
// TODO: write some sort of sanity check assertion for users
// that denote don't reset when there is activity
// assert (!(reset || !sio.sink_reset_n) || !io.enq.valid, "Enqueue while sink is reset and AsyncQueueSource is unprotected")
// assert (!reset_rise || prev_idx_match.asBool, "Sink reset while AsyncQueueSource not empty")
}
}
class AsyncQueueSink[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module {
override def desiredName = s"AsyncQueueSink_${gen.typeName}"
val io = IO(new Bundle {
// These come from the sink domain
val deq = Decoupled(gen)
// These cross to the source clock domain
val async = Flipped(new AsyncBundle(gen, params))
})
val bits = params.bits
val source_ready = WireInit(true.B)
val ridx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.deq.fire, !source_ready, "ridx_bin"))
val widx = AsyncResetSynchronizerShiftReg(io.async.widx, params.sync, Some("widx_gray"))
val valid = source_ready && ridx =/= widx
// The mux is safe because timing analysis ensures ridx has reached the register
// On an ASIC, changes to the unread location cannot affect the selected value
// On an FPGA, only one input changes at a time => mem updates don't cause glitches
// The register only latches when the selected valued is not being written
val index = if (bits == 0) 0.U else ridx(bits-1, 0) ^ (ridx(bits, bits) << (bits-1))
io.async.index.foreach { _ := index }
// This register does not NEED to be reset, as its contents will not
// be considered unless the asynchronously reset deq valid register is set.
// It is possible that bits latches when the source domain is reset / has power cut
// This is safe, because isolation gates brought mem low before the zeroed widx reached us
val deq_bits_nxt = io.async.mem(if (params.narrow) 0.U else index)
io.deq.bits := ClockCrossingReg(deq_bits_nxt, en = valid, doInit = false, name = Some("deq_bits_reg"))
val valid_reg = withReset(reset.asAsyncReset)(RegNext(next=valid, init=false.B).suggestName("valid_reg"))
io.deq.valid := valid_reg && source_ready
val ridx_reg = withReset(reset.asAsyncReset)(RegNext(next=ridx, init=0.U).suggestName("ridx_gray"))
io.async.ridx := ridx_reg
io.async.safe.foreach { sio =>
val sink_valid_0 = Module(new AsyncValidSync(params.sync, "sink_valid_0"))
val sink_valid_1 = Module(new AsyncValidSync(params.sync, "sink_valid_1"))
val source_extend = Module(new AsyncValidSync(params.sync, "source_extend"))
val source_valid = Module(new AsyncValidSync(params.sync, "source_valid"))
sink_valid_0 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
sink_valid_1 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
source_extend.reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
source_valid .reset := reset.asAsyncReset
sink_valid_0 .clock := clock
sink_valid_1 .clock := clock
source_extend.clock := clock
source_valid .clock := clock
sink_valid_0.io.in := true.B
sink_valid_1.io.in := sink_valid_0.io.out
sio.ridx_valid := sink_valid_1.io.out
source_extend.io.in := sio.widx_valid
source_valid.io.in := source_extend.io.out
source_ready := source_valid.io.out
sio.sink_reset_n := !reset.asBool
// TODO: write some sort of sanity check assertion for users
// that denote don't reset when there is activity
//
// val reset_and_extend = !source_ready || !sio.source_reset_n || reset.asBool
// val reset_and_extend_prev = RegNext(reset_and_extend, true.B)
// val reset_rise = !reset_and_extend_prev && reset_and_extend
// val prev_idx_match = AsyncResetReg(updateData=(io.async.widx===io.async.ridx), resetData=0)
// assert (!reset_rise || prev_idx_match.asBool, "Source reset while AsyncQueueSink not empty")
}
}
object FromAsyncBundle
{
// Sometimes it makes sense for the sink to have different sync than the source
def apply[T <: Data](x: AsyncBundle[T]): DecoupledIO[T] = apply(x, x.params.sync)
def apply[T <: Data](x: AsyncBundle[T], sync: Int): DecoupledIO[T] = {
val sink = Module(new AsyncQueueSink(chiselTypeOf(x.mem(0)), x.params.copy(sync = sync)))
sink.io.async <> x
sink.io.deq
}
}
object ToAsyncBundle
{
def apply[T <: Data](x: ReadyValidIO[T], params: AsyncQueueParams = AsyncQueueParams()): AsyncBundle[T] = {
val source = Module(new AsyncQueueSource(chiselTypeOf(x.bits), params))
source.io.enq <> x
source.io.async
}
}
class AsyncQueue[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Crossing[T] {
val io = IO(new CrossingIO(gen))
val source = withClockAndReset(io.enq_clock, io.enq_reset) { Module(new AsyncQueueSource(gen, params)) }
val sink = withClockAndReset(io.deq_clock, io.deq_reset) { Module(new AsyncQueueSink (gen, params)) }
source.io.enq <> io.enq
io.deq <> sink.io.deq
sink.io.async <> source.io.async
}
| module AsyncQueueSource_Phit_15( // @[AsyncQueue.scala:70:7]
input clock, // @[AsyncQueue.scala:70:7]
input reset, // @[AsyncQueue.scala:70:7]
output io_enq_ready, // @[AsyncQueue.scala:73:14]
input io_enq_valid, // @[AsyncQueue.scala:73:14]
input [31:0] io_enq_bits_phit, // @[AsyncQueue.scala:73:14]
output [31:0] io_async_mem_0_phit, // @[AsyncQueue.scala:73:14]
output [31:0] io_async_mem_1_phit, // @[AsyncQueue.scala:73:14]
output [31:0] io_async_mem_2_phit, // @[AsyncQueue.scala:73:14]
output [31:0] io_async_mem_3_phit, // @[AsyncQueue.scala:73:14]
output [31:0] io_async_mem_4_phit, // @[AsyncQueue.scala:73:14]
output [31:0] io_async_mem_5_phit, // @[AsyncQueue.scala:73:14]
output [31:0] io_async_mem_6_phit, // @[AsyncQueue.scala:73:14]
output [31:0] io_async_mem_7_phit, // @[AsyncQueue.scala:73:14]
input [3:0] io_async_ridx, // @[AsyncQueue.scala:73:14]
output [3:0] io_async_widx, // @[AsyncQueue.scala:73:14]
input io_async_safe_ridx_valid, // @[AsyncQueue.scala:73:14]
output io_async_safe_widx_valid, // @[AsyncQueue.scala:73:14]
output io_async_safe_source_reset_n, // @[AsyncQueue.scala:73:14]
input io_async_safe_sink_reset_n // @[AsyncQueue.scala:73:14]
);
wire _sink_extend_io_out; // @[AsyncQueue.scala:105:30]
wire _source_valid_0_io_out; // @[AsyncQueue.scala:102:32]
wire io_enq_valid_0 = io_enq_valid; // @[AsyncQueue.scala:70:7]
wire [31:0] io_enq_bits_phit_0 = io_enq_bits_phit; // @[AsyncQueue.scala:70:7]
wire [3:0] io_async_ridx_0 = io_async_ridx; // @[AsyncQueue.scala:70:7]
wire io_async_safe_ridx_valid_0 = io_async_safe_ridx_valid; // @[AsyncQueue.scala:70:7]
wire io_async_safe_sink_reset_n_0 = io_async_safe_sink_reset_n; // @[AsyncQueue.scala:70:7]
wire _widx_T = reset; // @[AsyncQueue.scala:83:30]
wire _ready_reg_T = reset; // @[AsyncQueue.scala:90:35]
wire _widx_reg_T = reset; // @[AsyncQueue.scala:93:34]
wire _source_valid_0_reset_T = reset; // @[AsyncQueue.scala:107:36]
wire _source_valid_1_reset_T = reset; // @[AsyncQueue.scala:108:36]
wire _sink_extend_reset_T = reset; // @[AsyncQueue.scala:109:36]
wire _sink_valid_reset_T = reset; // @[AsyncQueue.scala:110:35]
wire _io_async_safe_source_reset_n_T = reset; // @[AsyncQueue.scala:123:34]
wire _io_enq_ready_T; // @[AsyncQueue.scala:91:29]
wire _io_async_safe_source_reset_n_T_1; // @[AsyncQueue.scala:123:27]
wire io_enq_ready_0; // @[AsyncQueue.scala:70:7]
wire [31:0] io_async_mem_0_phit_0; // @[AsyncQueue.scala:70:7]
wire [31:0] io_async_mem_1_phit_0; // @[AsyncQueue.scala:70:7]
wire [31:0] io_async_mem_2_phit_0; // @[AsyncQueue.scala:70:7]
wire [31:0] io_async_mem_3_phit_0; // @[AsyncQueue.scala:70:7]
wire [31:0] io_async_mem_4_phit_0; // @[AsyncQueue.scala:70:7]
wire [31:0] io_async_mem_5_phit_0; // @[AsyncQueue.scala:70:7]
wire [31:0] io_async_mem_6_phit_0; // @[AsyncQueue.scala:70:7]
wire [31:0] io_async_mem_7_phit_0; // @[AsyncQueue.scala:70:7]
wire io_async_safe_widx_valid_0; // @[AsyncQueue.scala:70:7]
wire io_async_safe_source_reset_n_0; // @[AsyncQueue.scala:70:7]
wire [3:0] io_async_widx_0; // @[AsyncQueue.scala:70:7]
wire sink_ready; // @[AsyncQueue.scala:81:28]
reg [31:0] mem_0_phit; // @[AsyncQueue.scala:82:16]
assign io_async_mem_0_phit_0 = mem_0_phit; // @[AsyncQueue.scala:70:7, :82:16]
reg [31:0] mem_1_phit; // @[AsyncQueue.scala:82:16]
assign io_async_mem_1_phit_0 = mem_1_phit; // @[AsyncQueue.scala:70:7, :82:16]
reg [31:0] mem_2_phit; // @[AsyncQueue.scala:82:16]
assign io_async_mem_2_phit_0 = mem_2_phit; // @[AsyncQueue.scala:70:7, :82:16]
reg [31:0] mem_3_phit; // @[AsyncQueue.scala:82:16]
assign io_async_mem_3_phit_0 = mem_3_phit; // @[AsyncQueue.scala:70:7, :82:16]
reg [31:0] mem_4_phit; // @[AsyncQueue.scala:82:16]
assign io_async_mem_4_phit_0 = mem_4_phit; // @[AsyncQueue.scala:70:7, :82:16]
reg [31:0] mem_5_phit; // @[AsyncQueue.scala:82:16]
assign io_async_mem_5_phit_0 = mem_5_phit; // @[AsyncQueue.scala:70:7, :82:16]
reg [31:0] mem_6_phit; // @[AsyncQueue.scala:82:16]
assign io_async_mem_6_phit_0 = mem_6_phit; // @[AsyncQueue.scala:70:7, :82:16]
reg [31:0] mem_7_phit; // @[AsyncQueue.scala:82:16]
assign io_async_mem_7_phit_0 = mem_7_phit; // @[AsyncQueue.scala:70:7, :82:16]
wire _widx_T_1 = io_enq_ready_0 & io_enq_valid_0; // @[Decoupled.scala:51:35]
wire _widx_T_2 = ~sink_ready; // @[AsyncQueue.scala:81:28, :83:77]
wire [3:0] _widx_incremented_T_2; // @[AsyncQueue.scala:53:23]
wire [3:0] widx_incremented; // @[AsyncQueue.scala:51:27]
reg [3:0] widx_widx_bin; // @[AsyncQueue.scala:52:25]
wire [4:0] _widx_incremented_T = {1'h0, widx_widx_bin} + {4'h0, _widx_T_1}; // @[Decoupled.scala:51:35]
wire [3:0] _widx_incremented_T_1 = _widx_incremented_T[3:0]; // @[AsyncQueue.scala:53:43]
assign _widx_incremented_T_2 = _widx_T_2 ? 4'h0 : _widx_incremented_T_1; // @[AsyncQueue.scala:52:25, :53:{23,43}, :83:77]
assign widx_incremented = _widx_incremented_T_2; // @[AsyncQueue.scala:51:27, :53:23]
wire [2:0] _widx_T_3 = widx_incremented[3:1]; // @[AsyncQueue.scala:51:27, :54:32]
wire [3:0] widx = {widx_incremented[3], widx_incremented[2:0] ^ _widx_T_3}; // @[AsyncQueue.scala:51:27, :54:{17,32}]
wire [3:0] ridx; // @[ShiftReg.scala:48:24]
wire [3:0] _ready_T = ridx ^ 4'hC; // @[ShiftReg.scala:48:24]
wire _ready_T_1 = widx != _ready_T; // @[AsyncQueue.scala:54:17, :85:{34,44}]
wire ready = sink_ready & _ready_T_1; // @[AsyncQueue.scala:81:28, :85:{26,34}]
wire [2:0] _index_T = io_async_widx_0[2:0]; // @[AsyncQueue.scala:70:7, :87:52]
wire _index_T_1 = io_async_widx_0[3]; // @[AsyncQueue.scala:70:7, :87:80]
wire [2:0] _index_T_2 = {_index_T_1, 2'h0}; // @[AsyncQueue.scala:87:{80,93}]
wire [2:0] index = _index_T ^ _index_T_2; // @[AsyncQueue.scala:87:{52,64,93}]
reg ready_reg; // @[AsyncQueue.scala:90:56]
assign _io_enq_ready_T = ready_reg & sink_ready; // @[AsyncQueue.scala:81:28, :90:56, :91:29]
assign io_enq_ready_0 = _io_enq_ready_T; // @[AsyncQueue.scala:70:7, :91:29]
reg [3:0] widx_gray; // @[AsyncQueue.scala:93:55]
assign io_async_widx_0 = widx_gray; // @[AsyncQueue.scala:70:7, :93:55]
wire _source_valid_0_reset_T_1 = ~io_async_safe_sink_reset_n_0; // @[AsyncQueue.scala:70:7, :107:46]
wire _source_valid_0_reset_T_2 = _source_valid_0_reset_T | _source_valid_0_reset_T_1; // @[AsyncQueue.scala:107:{36,43,46}]
wire _source_valid_0_reset_T_3 = _source_valid_0_reset_T_2; // @[AsyncQueue.scala:107:{43,65}]
wire _source_valid_1_reset_T_1 = ~io_async_safe_sink_reset_n_0; // @[AsyncQueue.scala:70:7, :107:46, :108:46]
wire _source_valid_1_reset_T_2 = _source_valid_1_reset_T | _source_valid_1_reset_T_1; // @[AsyncQueue.scala:108:{36,43,46}]
wire _source_valid_1_reset_T_3 = _source_valid_1_reset_T_2; // @[AsyncQueue.scala:108:{43,65}]
wire _sink_extend_reset_T_1 = ~io_async_safe_sink_reset_n_0; // @[AsyncQueue.scala:70:7, :107:46, :109:46]
wire _sink_extend_reset_T_2 = _sink_extend_reset_T | _sink_extend_reset_T_1; // @[AsyncQueue.scala:109:{36,43,46}]
wire _sink_extend_reset_T_3 = _sink_extend_reset_T_2; // @[AsyncQueue.scala:109:{43,65}]
assign _io_async_safe_source_reset_n_T_1 = ~_io_async_safe_source_reset_n_T; // @[AsyncQueue.scala:123:{27,34}]
assign io_async_safe_source_reset_n_0 = _io_async_safe_source_reset_n_T_1; // @[AsyncQueue.scala:70:7, :123:27]
always @(posedge clock) begin // @[AsyncQueue.scala:70:7]
if (_widx_T_1 & index == 3'h0) // @[Decoupled.scala:51:35]
mem_0_phit <= io_enq_bits_phit_0; // @[AsyncQueue.scala:70:7, :82:16]
if (_widx_T_1 & index == 3'h1) // @[Decoupled.scala:51:35]
mem_1_phit <= io_enq_bits_phit_0; // @[AsyncQueue.scala:70:7, :82:16]
if (_widx_T_1 & index == 3'h2) // @[Decoupled.scala:51:35]
mem_2_phit <= io_enq_bits_phit_0; // @[AsyncQueue.scala:70:7, :82:16]
if (_widx_T_1 & index == 3'h3) // @[Decoupled.scala:51:35]
mem_3_phit <= io_enq_bits_phit_0; // @[AsyncQueue.scala:70:7, :82:16]
if (_widx_T_1 & index == 3'h4) // @[Decoupled.scala:51:35]
mem_4_phit <= io_enq_bits_phit_0; // @[AsyncQueue.scala:70:7, :82:16]
if (_widx_T_1 & index == 3'h5) // @[Decoupled.scala:51:35]
mem_5_phit <= io_enq_bits_phit_0; // @[AsyncQueue.scala:70:7, :82:16]
if (_widx_T_1 & index == 3'h6) // @[Decoupled.scala:51:35]
mem_6_phit <= io_enq_bits_phit_0; // @[AsyncQueue.scala:70:7, :82:16]
if (_widx_T_1 & (&index)) // @[Decoupled.scala:51:35]
mem_7_phit <= io_enq_bits_phit_0; // @[AsyncQueue.scala:70:7, :82:16]
always @(posedge)
always @(posedge clock or posedge _widx_T) begin // @[AsyncQueue.scala:70:7, :83:30]
if (_widx_T) // @[AsyncQueue.scala:70:7, :83:30]
widx_widx_bin <= 4'h0; // @[AsyncQueue.scala:52:25]
else // @[AsyncQueue.scala:70:7]
widx_widx_bin <= widx_incremented; // @[AsyncQueue.scala:51:27, :52:25]
always @(posedge, posedge)
always @(posedge clock or posedge _ready_reg_T) begin // @[AsyncQueue.scala:70:7, :90:35]
if (_ready_reg_T) // @[AsyncQueue.scala:70:7, :90:35]
ready_reg <= 1'h0; // @[AsyncQueue.scala:90:56]
else // @[AsyncQueue.scala:70:7]
ready_reg <= ready; // @[AsyncQueue.scala:85:26, :90:56]
always @(posedge, posedge)
always @(posedge clock or posedge _widx_reg_T) begin // @[AsyncQueue.scala:70:7, :93:34]
if (_widx_reg_T) // @[AsyncQueue.scala:70:7, :93:34]
widx_gray <= 4'h0; // @[AsyncQueue.scala:52:25, :93:55]
else // @[AsyncQueue.scala:70:7]
widx_gray <= widx; // @[AsyncQueue.scala:54:17, :93:55]
always @(posedge, posedge) |
Generate the Verilog code corresponding to the following Chisel files.
File TilelinkAdapters.scala:
package constellation.protocol
import chisel3._
import chisel3.util._
import constellation.channel._
import constellation.noc._
import constellation.soc.{CanAttachToGlobalNoC}
import org.chipsalliance.cde.config._
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.util._
import freechips.rocketchip.tilelink._
import scala.collection.immutable.{ListMap}
abstract class TLChannelToNoC[T <: TLChannel](gen: => T, edge: TLEdge, idToEgress: Int => Int)(implicit val p: Parameters) extends Module with TLFieldHelper {
val flitWidth = minTLPayloadWidth(gen)
val io = IO(new Bundle {
val protocol = Flipped(Decoupled(gen))
val flit = Decoupled(new IngressFlit(flitWidth))
})
def unique(x: Vector[Boolean]): Bool = (x.filter(x=>x).size <= 1).B
// convert decoupled to irrevocable
val q = Module(new Queue(gen, 1, pipe=true, flow=true))
val protocol = q.io.deq
val has_body = Wire(Bool())
val body_fields = getBodyFields(protocol.bits)
val const_fields = getConstFields(protocol.bits)
val head = edge.first(protocol.bits, protocol.fire)
val tail = edge.last(protocol.bits, protocol.fire)
def requestOH: Seq[Bool]
val body = Cat( body_fields.filter(_.getWidth > 0).map(_.asUInt))
val const = Cat(const_fields.filter(_.getWidth > 0).map(_.asUInt))
val is_body = RegInit(false.B)
io.flit.valid := protocol.valid
protocol.ready := io.flit.ready && (is_body || !has_body)
io.flit.bits.head := head && !is_body
io.flit.bits.tail := tail && (is_body || !has_body)
io.flit.bits.egress_id := Mux1H(requestOH.zipWithIndex.map { case (r, i) =>
r -> idToEgress(i).U
})
io.flit.bits.payload := Mux(is_body, body, const)
when (io.flit.fire && io.flit.bits.head) { is_body := true.B }
when (io.flit.fire && io.flit.bits.tail) { is_body := false.B }
}
abstract class TLChannelFromNoC[T <: TLChannel](gen: => T)(implicit val p: Parameters) extends Module with TLFieldHelper {
val flitWidth = minTLPayloadWidth(gen)
val io = IO(new Bundle {
val protocol = Decoupled(gen)
val flit = Flipped(Decoupled(new EgressFlit(flitWidth)))
})
// Handle size = 1 gracefully (Chisel3 empty range is broken)
def trim(id: UInt, size: Int): UInt = if (size <= 1) 0.U else id(log2Ceil(size)-1, 0)
val protocol = Wire(Decoupled(gen))
val body_fields = getBodyFields(protocol.bits)
val const_fields = getConstFields(protocol.bits)
val is_const = RegInit(true.B)
val const_reg = Reg(UInt(const_fields.map(_.getWidth).sum.W))
val const = Mux(io.flit.bits.head, io.flit.bits.payload, const_reg)
io.flit.ready := (is_const && !io.flit.bits.tail) || protocol.ready
protocol.valid := (!is_const || io.flit.bits.tail) && io.flit.valid
def assign(i: UInt, sigs: Seq[Data]) = {
var t = i
for (s <- sigs.reverse) {
s := t.asTypeOf(s.cloneType)
t = t >> s.getWidth
}
}
assign(const, const_fields)
assign(io.flit.bits.payload, body_fields)
when (io.flit.fire && io.flit.bits.head) { is_const := false.B; const_reg := io.flit.bits.payload }
when (io.flit.fire && io.flit.bits.tail) { is_const := true.B }
}
trait HasAddressDecoder {
// Filter a list to only those elements selected
def filter[T](data: Seq[T], mask: Seq[Boolean]) = (data zip mask).filter(_._2).map(_._1)
val edgeIn: TLEdge
val edgesOut: Seq[TLEdge]
lazy val reacheableIO = edgesOut.map { mp =>
edgeIn.client.clients.exists { c => mp.manager.managers.exists { m =>
c.visibility.exists { ca => m.address.exists { ma =>
ca.overlaps(ma)
}}
}}
}.toVector
lazy val releaseIO = (edgesOut zip reacheableIO).map { case (mp, reachable) =>
reachable && edgeIn.client.anySupportProbe && mp.manager.anySupportAcquireB
}.toVector
def outputPortFn(connectIO: Seq[Boolean]) = {
val port_addrs = edgesOut.map(_.manager.managers.flatMap(_.address))
val routingMask = AddressDecoder(filter(port_addrs, connectIO))
val route_addrs = port_addrs.map(seq => AddressSet.unify(seq.map(_.widen(~routingMask)).distinct))
route_addrs.map(seq => (addr: UInt) => seq.map(_.contains(addr)).reduce(_||_))
}
}
class TLAToNoC(
val edgeIn: TLEdge,
val edgesOut: Seq[TLEdge],
bundle: TLBundleParameters,
slaveToAEgress: Int => Int,
sourceStart: Int
)(implicit p: Parameters) extends TLChannelToNoC(new TLBundleA(bundle), edgeIn, slaveToAEgress)(p) with HasAddressDecoder {
has_body := edgeIn.hasData(protocol.bits) || (~protocol.bits.mask =/= 0.U)
lazy val connectAIO = reacheableIO
lazy val requestOH = outputPortFn(connectAIO).zipWithIndex.map { case (o, j) =>
connectAIO(j).B && (unique(connectAIO) || o(protocol.bits.address))
}
q.io.enq <> io.protocol
q.io.enq.bits.source := io.protocol.bits.source | sourceStart.U
}
class TLAFromNoC(edgeOut: TLEdge, bundle: TLBundleParameters)(implicit p: Parameters) extends TLChannelFromNoC(new TLBundleA(bundle))(p) {
io.protocol <> protocol
when (io.flit.bits.head) { io.protocol.bits.mask := ~(0.U(io.protocol.bits.mask.getWidth.W)) }
}
class TLBToNoC(
edgeOut: TLEdge,
edgesIn: Seq[TLEdge],
bundle: TLBundleParameters,
masterToBIngress: Int => Int
)(implicit p: Parameters) extends TLChannelToNoC(new TLBundleB(bundle), edgeOut, masterToBIngress)(p) {
has_body := edgeOut.hasData(protocol.bits) || (~protocol.bits.mask =/= 0.U)
lazy val inputIdRanges = TLXbar.mapInputIds(edgesIn.map(_.client))
lazy val requestOH = inputIdRanges.map { i => i.contains(protocol.bits.source) }
q.io.enq <> io.protocol
}
class TLBFromNoC(edgeIn: TLEdge, bundle: TLBundleParameters, sourceSize: Int)(implicit p: Parameters) extends TLChannelFromNoC(new TLBundleB(bundle))(p) {
io.protocol <> protocol
io.protocol.bits.source := trim(protocol.bits.source, sourceSize)
when (io.flit.bits.head) { io.protocol.bits.mask := ~(0.U(io.protocol.bits.mask.getWidth.W)) }
}
class TLCToNoC(
val edgeIn: TLEdge,
val edgesOut: Seq[TLEdge],
bundle: TLBundleParameters,
slaveToCEgress: Int => Int,
sourceStart: Int
)(implicit p: Parameters) extends TLChannelToNoC(new TLBundleC(bundle), edgeIn, slaveToCEgress)(p) with HasAddressDecoder {
has_body := edgeIn.hasData(protocol.bits)
lazy val connectCIO = releaseIO
lazy val requestOH = outputPortFn(connectCIO).zipWithIndex.map {
case (o, j) => connectCIO(j).B && (unique(connectCIO) || o(protocol.bits.address))
}
q.io.enq <> io.protocol
q.io.enq.bits.source := io.protocol.bits.source | sourceStart.U
}
class TLCFromNoC(edgeOut: TLEdge, bundle: TLBundleParameters)(implicit p: Parameters) extends TLChannelFromNoC(new TLBundleC(bundle))(p) {
io.protocol <> protocol
}
class TLDToNoC(
edgeOut: TLEdge,
edgesIn: Seq[TLEdge],
bundle: TLBundleParameters,
masterToDIngress: Int => Int,
sourceStart: Int
)(implicit p: Parameters) extends TLChannelToNoC(new TLBundleD(bundle), edgeOut, masterToDIngress)(p) {
has_body := edgeOut.hasData(protocol.bits)
lazy val inputIdRanges = TLXbar.mapInputIds(edgesIn.map(_.client))
lazy val requestOH = inputIdRanges.map { i => i.contains(protocol.bits.source) }
q.io.enq <> io.protocol
q.io.enq.bits.sink := io.protocol.bits.sink | sourceStart.U
}
class TLDFromNoC(edgeIn: TLEdge, bundle: TLBundleParameters, sourceSize: Int)(implicit p: Parameters) extends TLChannelFromNoC(new TLBundleD(bundle))(p)
{
io.protocol <> protocol
io.protocol.bits.source := trim(protocol.bits.source, sourceSize)
}
class TLEToNoC(
val edgeIn: TLEdge,
val edgesOut: Seq[TLEdge],
bundle: TLBundleParameters,
slaveToEEgress: Int => Int
)(implicit p: Parameters) extends TLChannelToNoC(new TLBundleE(bundle), edgeIn, slaveToEEgress)(p) {
has_body := edgeIn.hasData(protocol.bits)
lazy val outputIdRanges = TLXbar.mapOutputIds(edgesOut.map(_.manager))
lazy val requestOH = outputIdRanges.map { o => o.contains(protocol.bits.sink) }
q.io.enq <> io.protocol
}
class TLEFromNoC(edgeOut: TLEdge, bundle: TLBundleParameters, sourceSize: Int)(implicit p: Parameters) extends TLChannelFromNoC(new TLBundleE(bundle))(p) {
io.protocol <> protocol
io.protocol.bits.sink := trim(protocol.bits.sink, sourceSize)
}
| module TLAFromNoC_3( // @[TilelinkAdapters.scala:128:7]
input clock, // @[TilelinkAdapters.scala:128:7]
input reset, // @[TilelinkAdapters.scala:128:7]
input io_protocol_ready, // @[TilelinkAdapters.scala:56:14]
output io_protocol_valid, // @[TilelinkAdapters.scala:56:14]
output [2:0] io_protocol_bits_opcode, // @[TilelinkAdapters.scala:56:14]
output [2:0] io_protocol_bits_param, // @[TilelinkAdapters.scala:56:14]
output [3:0] io_protocol_bits_size, // @[TilelinkAdapters.scala:56:14]
output [5:0] io_protocol_bits_source, // @[TilelinkAdapters.scala:56:14]
output [31:0] io_protocol_bits_address, // @[TilelinkAdapters.scala:56:14]
output [7:0] io_protocol_bits_mask, // @[TilelinkAdapters.scala:56:14]
output [63:0] io_protocol_bits_data, // @[TilelinkAdapters.scala:56:14]
output io_protocol_bits_corrupt, // @[TilelinkAdapters.scala:56:14]
output io_flit_ready, // @[TilelinkAdapters.scala:56:14]
input io_flit_valid, // @[TilelinkAdapters.scala:56:14]
input io_flit_bits_head, // @[TilelinkAdapters.scala:56:14]
input io_flit_bits_tail, // @[TilelinkAdapters.scala:56:14]
input [72:0] io_flit_bits_payload // @[TilelinkAdapters.scala:56:14]
);
reg is_const; // @[TilelinkAdapters.scala:68:25]
reg [47:0] const_reg; // @[TilelinkAdapters.scala:69:22]
wire [47:0] const_0 = io_flit_bits_head ? io_flit_bits_payload[47:0] : const_reg; // @[TilelinkAdapters.scala:56:14, :69:22, :70:18]
wire io_flit_ready_0 = is_const & ~io_flit_bits_tail | io_protocol_ready; // @[TilelinkAdapters.scala:68:25, :71:{30,33,53}]
wire _GEN = io_flit_ready_0 & io_flit_valid; // @[Decoupled.scala:51:35]
wire _GEN_0 = _GEN & io_flit_bits_head; // @[Decoupled.scala:51:35]
always @(posedge clock) begin // @[TilelinkAdapters.scala:128:7]
if (reset) // @[TilelinkAdapters.scala:128:7]
is_const <= 1'h1; // @[TilelinkAdapters.scala:68:25, :128:7]
else // @[TilelinkAdapters.scala:128:7]
is_const <= _GEN & io_flit_bits_tail | ~_GEN_0 & is_const; // @[Decoupled.scala:51:35]
if (_GEN_0) // @[TilelinkAdapters.scala:84:22]
const_reg <= io_flit_bits_payload[47:0]; // @[TilelinkAdapters.scala:56:14, :69:22]
always @(posedge) |
Generate the Verilog code corresponding to the following Chisel files.
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
File MixedNode.scala:
package org.chipsalliance.diplomacy.nodes
import chisel3.{Data, DontCare, Wire}
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.{Field, Parameters}
import org.chipsalliance.diplomacy.ValName
import org.chipsalliance.diplomacy.sourceLine
/** One side metadata of a [[Dangle]].
*
* Describes one side of an edge going into or out of a [[BaseNode]].
*
* @param serial
* the global [[BaseNode.serial]] number of the [[BaseNode]] that this [[HalfEdge]] connects to.
* @param index
* the `index` in the [[BaseNode]]'s input or output port list that this [[HalfEdge]] belongs to.
*/
case class HalfEdge(serial: Int, index: Int) extends Ordered[HalfEdge] {
import scala.math.Ordered.orderingToOrdered
def compare(that: HalfEdge): Int = HalfEdge.unapply(this).compare(HalfEdge.unapply(that))
}
/** [[Dangle]] captures the `IO` information of a [[LazyModule]] and which two [[BaseNode]]s the [[Edges]]/[[Bundle]]
* connects.
*
* [[Dangle]]s are generated by [[BaseNode.instantiate]] using [[MixedNode.danglesOut]] and [[MixedNode.danglesIn]] ,
* [[LazyModuleImp.instantiate]] connects those that go to internal or explicit IO connections in a [[LazyModule]].
*
* @param source
* the source [[HalfEdge]] of this [[Dangle]], which captures the source [[BaseNode]] and the port `index` within
* that [[BaseNode]].
* @param sink
* sink [[HalfEdge]] of this [[Dangle]], which captures the sink [[BaseNode]] and the port `index` within that
* [[BaseNode]].
* @param flipped
* flip or not in [[AutoBundle.makeElements]]. If true this corresponds to `danglesOut`, if false it corresponds to
* `danglesIn`.
* @param dataOpt
* actual [[Data]] for the hardware connection. Can be empty if this belongs to a cloned module
*/
case class Dangle(source: HalfEdge, sink: HalfEdge, flipped: Boolean, name: String, dataOpt: Option[Data]) {
def data = dataOpt.get
}
/** [[Edges]] is a collection of parameters describing the functionality and connection for an interface, which is often
* derived from the interconnection protocol and can inform the parameterization of the hardware bundles that actually
* implement the protocol.
*/
case class Edges[EI, EO](in: Seq[EI], out: Seq[EO])
/** A field available in [[Parameters]] used to determine whether [[InwardNodeImp.monitor]] will be called. */
case object MonitorsEnabled extends Field[Boolean](true)
/** When rendering the edge in a graphical format, flip the order in which the edges' source and sink are presented.
*
* For example, when rendering graphML, yEd by default tries to put the source node vertically above the sink node, but
* [[RenderFlipped]] inverts this relationship. When a particular [[LazyModule]] contains both source nodes and sink
* nodes, flipping the rendering of one node's edge will usual produce a more concise visual layout for the
* [[LazyModule]].
*/
case object RenderFlipped extends Field[Boolean](false)
/** The sealed node class in the package, all node are derived from it.
*
* @param inner
* Sink interface implementation.
* @param outer
* Source interface implementation.
* @param valName
* val name of this node.
* @tparam DI
* Downward-flowing parameters received on the inner side of the node. It is usually a brunch of parameters
* describing the protocol parameters from a source. For an [[InwardNode]], it is determined by the connected
* [[OutwardNode]]. Since it can be connected to multiple sources, this parameter is always a Seq of source port
* parameters.
* @tparam UI
* Upward-flowing parameters generated by the inner side of the node. It is usually a brunch of parameters describing
* the protocol parameters of a sink. For an [[InwardNode]], it is determined itself.
* @tparam EI
* Edge Parameters describing a connection on the inner side of the node. It is usually a brunch of transfers
* specified for a sink according to protocol.
* @tparam BI
* Bundle type used when connecting to the inner side of the node. It is a hardware interface of this sink interface.
* It should extends from [[chisel3.Data]], which represents the real hardware.
* @tparam DO
* Downward-flowing parameters generated on the outer side of the node. It is usually a brunch of parameters
* describing the protocol parameters of a source. For an [[OutwardNode]], it is determined itself.
* @tparam UO
* Upward-flowing parameters received by the outer side of the node. It is usually a brunch of parameters describing
* the protocol parameters from a sink. For an [[OutwardNode]], it is determined by the connected [[InwardNode]].
* Since it can be connected to multiple sinks, this parameter is always a Seq of sink port parameters.
* @tparam EO
* Edge Parameters describing a connection on the outer side of the node. It is usually a brunch of transfers
* specified for a source according to protocol.
* @tparam BO
* Bundle type used when connecting to the outer side of the node. It is a hardware interface of this source
* interface. It should extends from [[chisel3.Data]], which represents the real hardware.
*
* @note
* Call Graph of [[MixedNode]]
* - line `─`: source is process by a function and generate pass to others
* - Arrow `→`: target of arrow is generated by source
*
* {{{
* (from the other node)
* ┌─────────────────────────────────────────────────────────[[InwardNode.uiParams]]─────────────┐
* ↓ │
* (binding node when elaboration) [[OutwardNode.uoParams]]────────────────────────[[MixedNode.mapParamsU]]→──────────┐ │
* [[InwardNode.accPI]] │ │ │
* │ │ (based on protocol) │
* │ │ [[MixedNode.inner.edgeI]] │
* │ │ ↓ │
* ↓ │ │ │
* (immobilize after elaboration) (inward port from [[OutwardNode]]) │ ↓ │
* [[InwardNode.iBindings]]──┐ [[MixedNode.iDirectPorts]]────────────────────→[[MixedNode.iPorts]] [[InwardNode.uiParams]] │
* │ │ ↑ │ │ │
* │ │ │ [[OutwardNode.doParams]] │ │
* │ │ │ (from the other node) │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* │ │ │ └────────┬──────────────┤ │
* │ │ │ │ │ │
* │ │ │ │ (based on protocol) │
* │ │ │ │ [[MixedNode.inner.edgeI]] │
* │ │ │ │ │ │
* │ │ (from the other node) │ ↓ │
* │ └───[[OutwardNode.oPortMapping]] [[OutwardNode.oStar]] │ [[MixedNode.edgesIn]]───┐ │
* │ ↑ ↑ │ │ ↓ │
* │ │ │ │ │ [[MixedNode.in]] │
* │ │ │ │ ↓ ↑ │
* │ (solve star connection) │ │ │ [[MixedNode.bundleIn]]──┘ │
* ├───[[MixedNode.resolveStar]]→─┼─────────────────────────────┤ └────────────────────────────────────┐ │
* │ │ │ [[MixedNode.bundleOut]]─┐ │ │
* │ │ │ ↑ ↓ │ │
* │ │ │ │ [[MixedNode.out]] │ │
* │ ↓ ↓ │ ↑ │ │
* │ ┌─────[[InwardNode.iPortMapping]] [[InwardNode.iStar]] [[MixedNode.edgesOut]]──┘ │ │
* │ │ (from the other node) ↑ │ │
* │ │ │ │ │ │
* │ │ │ [[MixedNode.outer.edgeO]] │ │
* │ │ │ (based on protocol) │ │
* │ │ │ │ │ │
* │ │ │ ┌────────────────────────────────────────┤ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* (immobilize after elaboration)│ ↓ │ │ │ │
* [[OutwardNode.oBindings]]─┘ [[MixedNode.oDirectPorts]]───→[[MixedNode.oPorts]] [[OutwardNode.doParams]] │ │
* ↑ (inward port from [[OutwardNode]]) │ │ │ │
* │ ┌─────────────────────────────────────────┤ │ │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* [[OutwardNode.accPO]] │ ↓ │ │ │
* (binding node when elaboration) │ [[InwardNode.diParams]]─────→[[MixedNode.mapParamsD]]────────────────────────────┘ │ │
* │ ↑ │ │
* │ └──────────────────────────────────────────────────────────────────────────────────────────┘ │
* └──────────────────────────────────────────────────────────────────────────────────────────────────────────┘
* }}}
*/
abstract class MixedNode[DI, UI, EI, BI <: Data, DO, UO, EO, BO <: Data](
val inner: InwardNodeImp[DI, UI, EI, BI],
val outer: OutwardNodeImp[DO, UO, EO, BO]
)(
implicit valName: ValName)
extends BaseNode
with NodeHandle[DI, UI, EI, BI, DO, UO, EO, BO]
with InwardNode[DI, UI, BI]
with OutwardNode[DO, UO, BO] {
// Generate a [[NodeHandle]] with inward and outward node are both this node.
val inward = this
val outward = this
/** Debug info of nodes binding. */
def bindingInfo: String = s"""$iBindingInfo
|$oBindingInfo
|""".stripMargin
/** Debug info of ports connecting. */
def connectedPortsInfo: String = s"""${oPorts.size} outward ports connected: [${oPorts.map(_._2.name).mkString(",")}]
|${iPorts.size} inward ports connected: [${iPorts.map(_._2.name).mkString(",")}]
|""".stripMargin
/** Debug info of parameters propagations. */
def parametersInfo: String = s"""${doParams.size} downstream outward parameters: [${doParams.mkString(",")}]
|${uoParams.size} upstream outward parameters: [${uoParams.mkString(",")}]
|${diParams.size} downstream inward parameters: [${diParams.mkString(",")}]
|${uiParams.size} upstream inward parameters: [${uiParams.mkString(",")}]
|""".stripMargin
/** For a given node, converts [[OutwardNode.accPO]] and [[InwardNode.accPI]] to [[MixedNode.oPortMapping]] and
* [[MixedNode.iPortMapping]].
*
* Given counts of known inward and outward binding and inward and outward star bindings, return the resolved inward
* stars and outward stars.
*
* This method will also validate the arguments and throw a runtime error if the values are unsuitable for this type
* of node.
*
* @param iKnown
* Number of known-size ([[BIND_ONCE]]) input bindings.
* @param oKnown
* Number of known-size ([[BIND_ONCE]]) output bindings.
* @param iStar
* Number of unknown size ([[BIND_STAR]]) input bindings.
* @param oStar
* Number of unknown size ([[BIND_STAR]]) output bindings.
* @return
* A Tuple of the resolved number of input and output connections.
*/
protected[diplomacy] def resolveStar(iKnown: Int, oKnown: Int, iStar: Int, oStar: Int): (Int, Int)
/** Function to generate downward-flowing outward params from the downward-flowing input params and the current output
* ports.
*
* @param n
* The size of the output sequence to generate.
* @param p
* Sequence of downward-flowing input parameters of this node.
* @return
* A `n`-sized sequence of downward-flowing output edge parameters.
*/
protected[diplomacy] def mapParamsD(n: Int, p: Seq[DI]): Seq[DO]
/** Function to generate upward-flowing input parameters from the upward-flowing output parameters [[uiParams]].
*
* @param n
* Size of the output sequence.
* @param p
* Upward-flowing output edge parameters.
* @return
* A n-sized sequence of upward-flowing input edge parameters.
*/
protected[diplomacy] def mapParamsU(n: Int, p: Seq[UO]): Seq[UI]
/** @return
* The sink cardinality of the node, the number of outputs bound with [[BIND_QUERY]] summed with inputs bound with
* [[BIND_STAR]].
*/
protected[diplomacy] lazy val sinkCard: Int = oBindings.count(_._3 == BIND_QUERY) + iBindings.count(_._3 == BIND_STAR)
/** @return
* The source cardinality of this node, the number of inputs bound with [[BIND_QUERY]] summed with the number of
* output bindings bound with [[BIND_STAR]].
*/
protected[diplomacy] lazy val sourceCard: Int =
iBindings.count(_._3 == BIND_QUERY) + oBindings.count(_._3 == BIND_STAR)
/** @return list of nodes involved in flex bindings with this node. */
protected[diplomacy] lazy val flexes: Seq[BaseNode] =
oBindings.filter(_._3 == BIND_FLEX).map(_._2) ++ iBindings.filter(_._3 == BIND_FLEX).map(_._2)
/** Resolves the flex to be either source or sink and returns the offset where the [[BIND_STAR]] operators begin
* greedily taking up the remaining connections.
*
* @return
* A value >= 0 if it is sink cardinality, a negative value for source cardinality. The magnitude of the return
* value is not relevant.
*/
protected[diplomacy] lazy val flexOffset: Int = {
/** Recursively performs a depth-first search of the [[flexes]], [[BaseNode]]s connected to this node with flex
* operators. The algorithm bottoms out when we either get to a node we have already visited or when we get to a
* connection that is not a flex and can set the direction for us. Otherwise, recurse by visiting the `flexes` of
* each node in the current set and decide whether they should be added to the set or not.
*
* @return
* the mapping of [[BaseNode]] indexed by their serial numbers.
*/
def DFS(v: BaseNode, visited: Map[Int, BaseNode]): Map[Int, BaseNode] = {
if (visited.contains(v.serial) || !v.flexibleArityDirection) {
visited
} else {
v.flexes.foldLeft(visited + (v.serial -> v))((sum, n) => DFS(n, sum))
}
}
/** Determine which [[BaseNode]] are involved in resolving the flex connections to/from this node.
*
* @example
* {{{
* a :*=* b :*=* c
* d :*=* b
* e :*=* f
* }}}
*
* `flexSet` for `a`, `b`, `c`, or `d` will be `Set(a, b, c, d)` `flexSet` for `e` or `f` will be `Set(e,f)`
*/
val flexSet = DFS(this, Map()).values
/** The total number of :*= operators where we're on the left. */
val allSink = flexSet.map(_.sinkCard).sum
/** The total number of :=* operators used when we're on the right. */
val allSource = flexSet.map(_.sourceCard).sum
require(
allSink == 0 || allSource == 0,
s"The nodes ${flexSet.map(_.name)} which are inter-connected by :*=* have ${allSink} :*= operators and ${allSource} :=* operators connected to them, making it impossible to determine cardinality inference direction."
)
allSink - allSource
}
/** @return A value >= 0 if it is sink cardinality, a negative value for source cardinality. */
protected[diplomacy] def edgeArityDirection(n: BaseNode): Int = {
if (flexibleArityDirection) flexOffset
else if (n.flexibleArityDirection) n.flexOffset
else 0
}
/** For a node which is connected between two nodes, select the one that will influence the direction of the flex
* resolution.
*/
protected[diplomacy] def edgeAritySelect(n: BaseNode, l: => Int, r: => Int): Int = {
val dir = edgeArityDirection(n)
if (dir < 0) l
else if (dir > 0) r
else 1
}
/** Ensure that the same node is not visited twice in resolving `:*=`, etc operators. */
private var starCycleGuard = false
/** Resolve all the star operators into concrete indicies. As connections are being made, some may be "star"
* connections which need to be resolved. In some way to determine how many actual edges they correspond to. We also
* need to build up the ranges of edges which correspond to each binding operator, so that We can apply the correct
* edge parameters and later build up correct bundle connections.
*
* [[oPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that oPort (binding
* operator). [[iPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that iPort
* (binding operator). [[oStar]]: `Int` the value to return for this node `N` for any `N :*= foo` or `N :*=* foo :*=
* bar` [[iStar]]: `Int` the value to return for this node `N` for any `foo :=* N` or `bar :=* foo :*=* N`
*/
protected[diplomacy] lazy val (
oPortMapping: Seq[(Int, Int)],
iPortMapping: Seq[(Int, Int)],
oStar: Int,
iStar: Int
) = {
try {
if (starCycleGuard) throw StarCycleException()
starCycleGuard = true
// For a given node N...
// Number of foo :=* N
// + Number of bar :=* foo :*=* N
val oStars = oBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) < 0)
}
// Number of N :*= foo
// + Number of N :*=* foo :*= bar
val iStars = iBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) > 0)
}
// 1 for foo := N
// + bar.iStar for bar :*= foo :*=* N
// + foo.iStar for foo :*= N
// + 0 for foo :=* N
val oKnown = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, 0, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => 0
}
}.sum
// 1 for N := foo
// + bar.oStar for N :*=* foo :=* bar
// + foo.oStar for N :=* foo
// + 0 for N :*= foo
val iKnown = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, 0)
case BIND_QUERY => n.oStar
case BIND_STAR => 0
}
}.sum
// Resolve star depends on the node subclass to implement the algorithm for this.
val (iStar, oStar) = resolveStar(iKnown, oKnown, iStars, oStars)
// Cumulative list of resolved outward binding range starting points
val oSum = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, oStar, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => oStar
}
}.scanLeft(0)(_ + _)
// Cumulative list of resolved inward binding range starting points
val iSum = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, iStar)
case BIND_QUERY => n.oStar
case BIND_STAR => iStar
}
}.scanLeft(0)(_ + _)
// Create ranges for each binding based on the running sums and return
// those along with resolved values for the star operations.
(oSum.init.zip(oSum.tail), iSum.init.zip(iSum.tail), oStar, iStar)
} catch {
case c: StarCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Sequence of inward ports.
*
* This should be called after all star bindings are resolved.
*
* Each element is: `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding.
* `n` Instance of inward node. `p` View of [[Parameters]] where this connection was made. `s` Source info where this
* connection was made in the source code.
*/
protected[diplomacy] lazy val oDirectPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] =
oBindings.flatMap { case (i, n, _, p, s) =>
// for each binding operator in this node, look at what it connects to
val (start, end) = n.iPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
/** Sequence of outward ports.
*
* This should be called after all star bindings are resolved.
*
* `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. `n` Instance of
* outward node. `p` View of [[Parameters]] where this connection was made. `s` [[SourceInfo]] where this connection
* was made in the source code.
*/
protected[diplomacy] lazy val iDirectPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] =
iBindings.flatMap { case (i, n, _, p, s) =>
// query this port index range of this node in the other side of node.
val (start, end) = n.oPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
// Ephemeral nodes ( which have non-None iForward/oForward) have in_degree = out_degree
// Thus, there must exist an Eulerian path and the below algorithms terminate
@scala.annotation.tailrec
private def oTrace(
tuple: (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)
): (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.iForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => oTrace((j, m, p, s))
}
}
@scala.annotation.tailrec
private def iTrace(
tuple: (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)
): (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.oForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => iTrace((j, m, p, s))
}
}
/** Final output ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - Numeric index of this binding in the [[InwardNode]] on the other end.
* - [[InwardNode]] on the other end of this binding.
* - A view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val oPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oDirectPorts.map(oTrace)
/** Final input ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - numeric index of this binding in [[OutwardNode]] on the other end.
* - [[OutwardNode]] on the other end of this binding.
* - a view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val iPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iDirectPorts.map(iTrace)
private var oParamsCycleGuard = false
protected[diplomacy] lazy val diParams: Seq[DI] = iPorts.map { case (i, n, _, _) => n.doParams(i) }
protected[diplomacy] lazy val doParams: Seq[DO] = {
try {
if (oParamsCycleGuard) throw DownwardCycleException()
oParamsCycleGuard = true
val o = mapParamsD(oPorts.size, diParams)
require(
o.size == oPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of outward ports should equal the number of produced outward parameters.
|$context
|$connectedPortsInfo
|Downstreamed inward parameters: [${diParams.mkString(",")}]
|Produced outward parameters: [${o.mkString(",")}]
|""".stripMargin
)
o.map(outer.mixO(_, this))
} catch {
case c: DownwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
private var iParamsCycleGuard = false
protected[diplomacy] lazy val uoParams: Seq[UO] = oPorts.map { case (o, n, _, _) => n.uiParams(o) }
protected[diplomacy] lazy val uiParams: Seq[UI] = {
try {
if (iParamsCycleGuard) throw UpwardCycleException()
iParamsCycleGuard = true
val i = mapParamsU(iPorts.size, uoParams)
require(
i.size == iPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of inward ports should equal the number of produced inward parameters.
|$context
|$connectedPortsInfo
|Upstreamed outward parameters: [${uoParams.mkString(",")}]
|Produced inward parameters: [${i.mkString(",")}]
|""".stripMargin
)
i.map(inner.mixI(_, this))
} catch {
case c: UpwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Outward edge parameters. */
protected[diplomacy] lazy val edgesOut: Seq[EO] =
(oPorts.zip(doParams)).map { case ((i, n, p, s), o) => outer.edgeO(o, n.uiParams(i), p, s) }
/** Inward edge parameters. */
protected[diplomacy] lazy val edgesIn: Seq[EI] =
(iPorts.zip(uiParams)).map { case ((o, n, p, s), i) => inner.edgeI(n.doParams(o), i, p, s) }
/** A tuple of the input edge parameters and output edge parameters for the edges bound to this node.
*
* If you need to access to the edges of a foreign Node, use this method (in/out create bundles).
*/
lazy val edges: Edges[EI, EO] = Edges(edgesIn, edgesOut)
/** Create actual Wires corresponding to the Bundles parameterized by the outward edges of this node. */
protected[diplomacy] lazy val bundleOut: Seq[BO] = edgesOut.map { e =>
val x = Wire(outer.bundleO(e)).suggestName(s"${valName.value}Out")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
/** Create actual Wires corresponding to the Bundles parameterized by the inward edges of this node. */
protected[diplomacy] lazy val bundleIn: Seq[BI] = edgesIn.map { e =>
val x = Wire(inner.bundleI(e)).suggestName(s"${valName.value}In")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
private def emptyDanglesOut: Seq[Dangle] = oPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(serial, i),
sink = HalfEdge(n.serial, j),
flipped = false,
name = wirePrefix + "out",
dataOpt = None
)
}
private def emptyDanglesIn: Seq[Dangle] = iPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(n.serial, j),
sink = HalfEdge(serial, i),
flipped = true,
name = wirePrefix + "in",
dataOpt = None
)
}
/** Create the [[Dangle]]s which describe the connections from this node output to other nodes inputs. */
protected[diplomacy] def danglesOut: Seq[Dangle] = emptyDanglesOut.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleOut(i)))
}
/** Create the [[Dangle]]s which describe the connections from this node input from other nodes outputs. */
protected[diplomacy] def danglesIn: Seq[Dangle] = emptyDanglesIn.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleIn(i)))
}
private[diplomacy] var instantiated = false
/** Gather Bundle and edge parameters of outward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def out: Seq[(BO, EO)] = {
require(
instantiated,
s"$name.out should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleOut.zip(edgesOut)
}
/** Gather Bundle and edge parameters of inward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def in: Seq[(BI, EI)] = {
require(
instantiated,
s"$name.in should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleIn.zip(edgesIn)
}
/** Actually instantiate this node during [[LazyModuleImp]] evaluation. Mark that it's safe to use the Bundle wires,
* instantiate monitors on all input ports if appropriate, and return all the dangles of this node.
*/
protected[diplomacy] def instantiate(): Seq[Dangle] = {
instantiated = true
if (!circuitIdentity) {
(iPorts.zip(in)).foreach { case ((_, _, p, _), (b, e)) => if (p(MonitorsEnabled)) inner.monitor(b, e) }
}
danglesOut ++ danglesIn
}
protected[diplomacy] def cloneDangles(): Seq[Dangle] = emptyDanglesOut ++ emptyDanglesIn
/** Connects the outward part of a node with the inward part of this node. */
protected[diplomacy] def bind(
h: OutwardNode[DI, UI, BI],
binding: NodeBinding
)(
implicit p: Parameters,
sourceInfo: SourceInfo
): Unit = {
val x = this // x := y
val y = h
sourceLine(sourceInfo, " at ", "")
val i = x.iPushed
val o = y.oPushed
y.oPush(
i,
x,
binding match {
case BIND_ONCE => BIND_ONCE
case BIND_FLEX => BIND_FLEX
case BIND_STAR => BIND_QUERY
case BIND_QUERY => BIND_STAR
}
)
x.iPush(o, y, binding)
}
/* Metadata for printing the node graph. */
def inputs: Seq[(OutwardNode[DI, UI, BI], RenderedEdge)] = (iPorts.zip(edgesIn)).map { case ((_, n, p, _), e) =>
val re = inner.render(e)
(n, re.copy(flipped = re.flipped != p(RenderFlipped)))
}
/** Metadata for printing the node graph */
def outputs: Seq[(InwardNode[DO, UO, BO], RenderedEdge)] = oPorts.map { case (i, n, _, _) => (n, n.inputs(i)._2) }
}
File Edges.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
class TLEdge(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdgeParameters(client, manager, params, sourceInfo)
{
def isAligned(address: UInt, lgSize: UInt): Bool = {
if (maxLgSize == 0) true.B else {
val mask = UIntToOH1(lgSize, maxLgSize)
(address & mask) === 0.U
}
}
def mask(address: UInt, lgSize: UInt): UInt =
MaskGen(address, lgSize, manager.beatBytes)
def staticHasData(bundle: TLChannel): Option[Boolean] = {
bundle match {
case _:TLBundleA => {
// Do there exist A messages with Data?
val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial
// Do there exist A messages without Data?
val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint
// Statically optimize the case where hasData is a constant
if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None
}
case _:TLBundleB => {
// Do there exist B messages with Data?
val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial
// Do there exist B messages without Data?
val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint
// Statically optimize the case where hasData is a constant
if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None
}
case _:TLBundleC => {
// Do there eixst C messages with Data?
val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe
// Do there exist C messages without Data?
val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe
if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None
}
case _:TLBundleD => {
// Do there eixst D messages with Data?
val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB
// Do there exist D messages without Data?
val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT
if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None
}
case _:TLBundleE => Some(false)
}
}
def isRequest(x: TLChannel): Bool = {
x match {
case a: TLBundleA => true.B
case b: TLBundleB => true.B
case c: TLBundleC => c.opcode(2) && c.opcode(1)
// opcode === TLMessages.Release ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(2) && !d.opcode(1)
// opcode === TLMessages.Grant ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
}
def isResponse(x: TLChannel): Bool = {
x match {
case a: TLBundleA => false.B
case b: TLBundleB => false.B
case c: TLBundleC => !c.opcode(2) || !c.opcode(1)
// opcode =/= TLMessages.Release &&
// opcode =/= TLMessages.ReleaseData
case d: TLBundleD => true.B // Grant isResponse + isRequest
case e: TLBundleE => true.B
}
}
def hasData(x: TLChannel): Bool = {
val opdata = x match {
case a: TLBundleA => !a.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case b: TLBundleB => !b.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case c: TLBundleC => c.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.ProbeAckData ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
staticHasData(x).map(_.B).getOrElse(opdata)
}
def opcode(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.opcode
case b: TLBundleB => b.opcode
case c: TLBundleC => c.opcode
case d: TLBundleD => d.opcode
}
}
def param(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.param
case b: TLBundleB => b.param
case c: TLBundleC => c.param
case d: TLBundleD => d.param
}
}
def size(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.size
case b: TLBundleB => b.size
case c: TLBundleC => c.size
case d: TLBundleD => d.size
}
}
def data(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.data
case b: TLBundleB => b.data
case c: TLBundleC => c.data
case d: TLBundleD => d.data
}
}
def corrupt(x: TLDataChannel): Bool = {
x match {
case a: TLBundleA => a.corrupt
case b: TLBundleB => b.corrupt
case c: TLBundleC => c.corrupt
case d: TLBundleD => d.corrupt
}
}
def mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.mask
case b: TLBundleB => b.mask
case c: TLBundleC => mask(c.address, c.size)
}
}
def full_mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => mask(a.address, a.size)
case b: TLBundleB => mask(b.address, b.size)
case c: TLBundleC => mask(c.address, c.size)
}
}
def address(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.address
case b: TLBundleB => b.address
case c: TLBundleC => c.address
}
}
def source(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.source
case b: TLBundleB => b.source
case c: TLBundleC => c.source
case d: TLBundleD => d.source
}
}
def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes)
def addr_lo(x: UInt): UInt =
if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0)
def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x))
def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x))
def numBeats(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 1.U
case bundle: TLDataChannel => {
val hasData = this.hasData(bundle)
val size = this.size(bundle)
val cutoff = log2Ceil(manager.beatBytes)
val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U
val decode = UIntToOH(size, maxLgSize+1) >> cutoff
Mux(hasData, decode | small.asUInt, 1.U)
}
}
}
def numBeats1(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 0.U
case bundle: TLDataChannel => {
if (maxLgSize == 0) {
0.U
} else {
val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes)
Mux(hasData(bundle), decode, 0.U)
}
}
}
}
def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val beats1 = numBeats1(bits)
val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W))
val counter1 = counter - 1.U
val first = counter === 0.U
val last = counter === 1.U || beats1 === 0.U
val done = last && fire
val count = (beats1 & ~counter1)
when (fire) {
counter := Mux(first, beats1, counter1)
}
(first, last, done, count)
}
def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1
def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire)
def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid)
def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2
def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire)
def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid)
def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3
def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire)
def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid)
def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3)
}
def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire)
def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid)
def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4)
}
def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire)
def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid)
def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes))
}
def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire)
def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid)
// Does the request need T permissions to be executed?
def needT(a: TLBundleA): Bool = {
val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLPermissions.NtoB -> false.B,
TLPermissions.NtoT -> true.B,
TLPermissions.BtoT -> true.B))
MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array(
TLMessages.PutFullData -> true.B,
TLMessages.PutPartialData -> true.B,
TLMessages.ArithmeticData -> true.B,
TLMessages.LogicalData -> true.B,
TLMessages.Get -> false.B,
TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLHints.PREFETCH_READ -> false.B,
TLHints.PREFETCH_WRITE -> true.B)),
TLMessages.AcquireBlock -> acq_needT,
TLMessages.AcquirePerm -> acq_needT))
}
// This is a very expensive circuit; use only if you really mean it!
def inFlight(x: TLBundle): (UInt, UInt) = {
val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W))
val bce = manager.anySupportAcquireB && client.anySupportProbe
val (a_first, a_last, _) = firstlast(x.a)
val (b_first, b_last, _) = firstlast(x.b)
val (c_first, c_last, _) = firstlast(x.c)
val (d_first, d_last, _) = firstlast(x.d)
val (e_first, e_last, _) = firstlast(x.e)
val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits))
val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits))
val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits))
val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits))
val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits))
val a_inc = x.a.fire && a_first && a_request
val b_inc = x.b.fire && b_first && b_request
val c_inc = x.c.fire && c_first && c_request
val d_inc = x.d.fire && d_first && d_request
val e_inc = x.e.fire && e_first && e_request
val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil))
val a_dec = x.a.fire && a_last && a_response
val b_dec = x.b.fire && b_last && b_response
val c_dec = x.c.fire && c_last && c_response
val d_dec = x.d.fire && d_last && d_response
val e_dec = x.e.fire && e_last && e_response
val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil))
val next_flight = flight + PopCount(inc) - PopCount(dec)
flight := next_flight
(flight, next_flight)
}
def prettySourceMapping(context: String): String = {
s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n"
}
}
class TLEdgeOut(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
// Transfers
def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquireBlock
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquirePerm
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.Release
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ReleaseData
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) =
Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B)
def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAck
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions, data)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAckData
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B)
def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink)
def GrantAck(toSink: UInt): TLBundleE = {
val e = Wire(new TLBundleE(bundle))
e.sink := toSink
e
}
// Accesses
def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsGetFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Get
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutFullFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutFullData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, mask, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutPartialFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutPartialData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = {
require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsArithmeticFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.ArithmeticData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsLogicalFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.LogicalData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = {
require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsHintFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Hint
a.param := param
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data)
def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAckData
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size)
def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.HintAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
}
class TLEdgeIn(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = {
val todo = x.filter(!_.isEmpty)
val heads = todo.map(_.head)
val tails = todo.map(_.tail)
if (todo.isEmpty) Nil else { heads +: myTranspose(tails) }
}
// Transfers
def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = {
require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}")
val legal = client.supportsProbe(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Probe
b.param := capPermissions
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.Grant
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.GrantData
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B)
def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.ReleaseAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
// Accesses
def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = {
require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsGet(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Get
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsPutFull(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutFullData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, mask, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsPutPartial(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutPartialData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsArithmetic(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.ArithmeticData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsLogical(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.LogicalData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = {
require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsHint(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Hint
b.param := param
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size)
def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied)
def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data)
def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAckData
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B)
def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied)
def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B)
def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.HintAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
}
File ScratchpadSlavePort.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.rocket
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.diplomacy.{AddressSet, RegionType, TransferSizes}
import freechips.rocketchip.resources.{SimpleDevice}
import freechips.rocketchip.tilelink.{TLManagerNode, TLSlavePortParameters, TLSlaveParameters, TLBundleA, TLMessages, TLAtomics}
import freechips.rocketchip.util.UIntIsOneOf
import freechips.rocketchip.util.DataToAugmentedData
/* This adapter converts between diplomatic TileLink and non-diplomatic HellaCacheIO */
class ScratchpadSlavePort(address: Seq[AddressSet], coreDataBytes: Int, usingAtomics: Boolean)(implicit p: Parameters) extends LazyModule {
def this(address: AddressSet, coreDataBytes: Int, usingAtomics: Boolean)(implicit p: Parameters) = {
this(Seq(address), coreDataBytes, usingAtomics)
}
val device = new SimpleDevice("dtim", Seq("sifive,dtim0"))
val node = TLManagerNode(Seq(TLSlavePortParameters.v1(
Seq(TLSlaveParameters.v1(
address = address,
resources = device.reg("mem"),
regionType = RegionType.IDEMPOTENT,
executable = true,
supportsArithmetic = if (usingAtomics) TransferSizes(4, coreDataBytes) else TransferSizes.none,
supportsLogical = if (usingAtomics) TransferSizes(4, coreDataBytes) else TransferSizes.none,
supportsPutPartial = TransferSizes(1, coreDataBytes),
supportsPutFull = TransferSizes(1, coreDataBytes),
supportsGet = TransferSizes(1, coreDataBytes),
fifoId = Some(0))), // requests handled in FIFO order
beatBytes = coreDataBytes,
minLatency = 1)))
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
val io = IO(new Bundle {
val dmem = new HellaCacheIO
})
require(coreDataBytes * 8 == io.dmem.resp.bits.data.getWidth, "ScratchpadSlavePort is misconfigured: coreDataBytes must match D$ data width")
val (tl_in, edge) = node.in(0)
val s_ready :: s_wait1 :: s_wait2 :: s_replay :: s_init :: s_grant :: Nil = Enum(6)
val state = RegInit(s_init)
val dmem_req_valid = Wire(Bool())
when (state === s_wait1) { state := s_wait2 }
when (state === s_init && tl_in.a.valid) { state := s_ready }
when (io.dmem.resp.valid) { state := s_grant }
when (tl_in.d.fire) { state := s_ready }
when (io.dmem.s2_nack) { state := s_replay }
when (dmem_req_valid && io.dmem.req.ready) { state := s_wait1 }
val acq = Reg(tl_in.a.bits.cloneType)
when (tl_in.a.fire) { acq := tl_in.a.bits }
def formCacheReq(a: TLBundleA) = {
val req = Wire(new HellaCacheReq)
req.cmd := MuxLookup(a.opcode, M_XRD)(Array(
TLMessages.PutFullData -> M_XWR,
TLMessages.PutPartialData -> M_PWR,
TLMessages.ArithmeticData -> MuxLookup(a.param, M_XRD)(Array(
TLAtomics.MIN -> M_XA_MIN,
TLAtomics.MAX -> M_XA_MAX,
TLAtomics.MINU -> M_XA_MINU,
TLAtomics.MAXU -> M_XA_MAXU,
TLAtomics.ADD -> M_XA_ADD)),
TLMessages.LogicalData -> MuxLookup(a.param, M_XRD)(Array(
TLAtomics.XOR -> M_XA_XOR,
TLAtomics.OR -> M_XA_OR,
TLAtomics.AND -> M_XA_AND,
TLAtomics.SWAP -> M_XA_SWAP)),
TLMessages.Get -> M_XRD))
// Convert full PutPartial into PutFull to work around RMWs causing X-prop problems.
// Also prevent cmd becoming X out of reset by checking for s_init.
val mask_full = {
val desired_mask = new StoreGen(a.size, a.address, 0.U, coreDataBytes).mask
(a.mask | ~desired_mask).andR
}
when (state === s_init || (a.opcode === TLMessages.PutPartialData && mask_full)) {
req.cmd := M_XWR
}
req.size := a.size
req.signed := false.B
req.addr := a.address
req.tag := 0.U
req.phys := true.B
req.no_xcpt := true.B
req.no_resp := false.B
req.data := 0.U
req.no_alloc := false.B
req.mask := 0.U
req.dprv := 0.U
req.dv := false.B
req
}
// ready_likely assumes that a valid response in s_wait2 is the vastly
// common case. In the uncommon case, we'll erroneously send a request,
// then s1_kill it the following cycle.
val ready_likely = state.isOneOf(s_ready, s_wait2)
val ready = state === s_ready || state === s_wait2 && io.dmem.resp.valid && tl_in.d.ready
dmem_req_valid := (tl_in.a.valid && ready) || state === s_replay
val dmem_req_valid_likely = (tl_in.a.valid && ready_likely) || state === s_replay
io.dmem.keep_clock_enabled := DontCare
io.dmem.req.valid := dmem_req_valid_likely
tl_in.a.ready := io.dmem.req.ready && ready
io.dmem.req.bits := formCacheReq(Mux(state === s_replay, acq, tl_in.a.bits))
io.dmem.s1_data.data := acq.data
io.dmem.s1_data.mask := acq.mask
io.dmem.s1_kill := state =/= s_wait1
io.dmem.s2_kill := false.B
tl_in.d.valid := io.dmem.resp.valid || state === s_grant
tl_in.d.bits := Mux(acq.opcode.isOneOf(TLMessages.PutFullData, TLMessages.PutPartialData),
edge.AccessAck(acq),
edge.AccessAck(acq, 0.U))
tl_in.d.bits.data := io.dmem.resp.bits.data_raw.holdUnless(state === s_wait2)
// Tie off unused channels
tl_in.b.valid := false.B
tl_in.c.ready := true.B
tl_in.e.ready := true.B
}
}
| module ScratchpadSlavePort( // @[ScratchpadSlavePort.scala:43:9]
input clock, // @[ScratchpadSlavePort.scala:43:9]
input reset, // @[ScratchpadSlavePort.scala:43:9]
output auto_in_a_ready, // @[LazyModuleImp.scala:107:25]
input auto_in_a_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_a_bits_param, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_in_a_bits_size, // @[LazyModuleImp.scala:107:25]
input [11:0] auto_in_a_bits_source, // @[LazyModuleImp.scala:107:25]
input [21:0] auto_in_a_bits_address, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_in_a_bits_mask, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_in_a_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_in_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_in_d_ready, // @[LazyModuleImp.scala:107:25]
output auto_in_d_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_in_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_in_d_bits_size, // @[LazyModuleImp.scala:107:25]
output [11:0] auto_in_d_bits_source, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_in_d_bits_data, // @[LazyModuleImp.scala:107:25]
input io_dmem_req_ready, // @[ScratchpadSlavePort.scala:44:16]
output io_dmem_req_valid, // @[ScratchpadSlavePort.scala:44:16]
output [39:0] io_dmem_req_bits_addr, // @[ScratchpadSlavePort.scala:44:16]
output [4:0] io_dmem_req_bits_cmd, // @[ScratchpadSlavePort.scala:44:16]
output [1:0] io_dmem_req_bits_size, // @[ScratchpadSlavePort.scala:44:16]
output io_dmem_s1_kill, // @[ScratchpadSlavePort.scala:44:16]
output [63:0] io_dmem_s1_data_data, // @[ScratchpadSlavePort.scala:44:16]
output [7:0] io_dmem_s1_data_mask, // @[ScratchpadSlavePort.scala:44:16]
input io_dmem_s2_nack, // @[ScratchpadSlavePort.scala:44:16]
input io_dmem_s2_nack_cause_raw, // @[ScratchpadSlavePort.scala:44:16]
input io_dmem_s2_uncached, // @[ScratchpadSlavePort.scala:44:16]
input [31:0] io_dmem_s2_paddr, // @[ScratchpadSlavePort.scala:44:16]
input io_dmem_resp_valid, // @[ScratchpadSlavePort.scala:44:16]
input [39:0] io_dmem_resp_bits_addr, // @[ScratchpadSlavePort.scala:44:16]
input [7:0] io_dmem_resp_bits_tag, // @[ScratchpadSlavePort.scala:44:16]
input [4:0] io_dmem_resp_bits_cmd, // @[ScratchpadSlavePort.scala:44:16]
input [1:0] io_dmem_resp_bits_size, // @[ScratchpadSlavePort.scala:44:16]
input io_dmem_resp_bits_signed, // @[ScratchpadSlavePort.scala:44:16]
input [1:0] io_dmem_resp_bits_dprv, // @[ScratchpadSlavePort.scala:44:16]
input io_dmem_resp_bits_dv, // @[ScratchpadSlavePort.scala:44:16]
input [63:0] io_dmem_resp_bits_data, // @[ScratchpadSlavePort.scala:44:16]
input [7:0] io_dmem_resp_bits_mask, // @[ScratchpadSlavePort.scala:44:16]
input io_dmem_resp_bits_replay, // @[ScratchpadSlavePort.scala:44:16]
input io_dmem_resp_bits_has_data, // @[ScratchpadSlavePort.scala:44:16]
input [63:0] io_dmem_resp_bits_data_word_bypass, // @[ScratchpadSlavePort.scala:44:16]
input [63:0] io_dmem_resp_bits_data_raw, // @[ScratchpadSlavePort.scala:44:16]
input [63:0] io_dmem_resp_bits_store_data, // @[ScratchpadSlavePort.scala:44:16]
input io_dmem_replay_next, // @[ScratchpadSlavePort.scala:44:16]
input io_dmem_s2_xcpt_ma_ld, // @[ScratchpadSlavePort.scala:44:16]
input io_dmem_s2_xcpt_ma_st, // @[ScratchpadSlavePort.scala:44:16]
input io_dmem_s2_xcpt_pf_ld, // @[ScratchpadSlavePort.scala:44:16]
input io_dmem_s2_xcpt_pf_st, // @[ScratchpadSlavePort.scala:44:16]
input io_dmem_s2_xcpt_ae_ld, // @[ScratchpadSlavePort.scala:44:16]
input io_dmem_s2_xcpt_ae_st, // @[ScratchpadSlavePort.scala:44:16]
input [39:0] io_dmem_s2_gpa, // @[ScratchpadSlavePort.scala:44:16]
input io_dmem_ordered, // @[ScratchpadSlavePort.scala:44:16]
input io_dmem_store_pending, // @[ScratchpadSlavePort.scala:44:16]
input io_dmem_perf_acquire, // @[ScratchpadSlavePort.scala:44:16]
input io_dmem_perf_grant, // @[ScratchpadSlavePort.scala:44:16]
input io_dmem_perf_tlbMiss, // @[ScratchpadSlavePort.scala:44:16]
input io_dmem_perf_blocked, // @[ScratchpadSlavePort.scala:44:16]
input io_dmem_perf_canAcceptStoreThenLoad, // @[ScratchpadSlavePort.scala:44:16]
input io_dmem_perf_canAcceptStoreThenRMW, // @[ScratchpadSlavePort.scala:44:16]
input io_dmem_perf_canAcceptLoadThenLoad, // @[ScratchpadSlavePort.scala:44:16]
input io_dmem_perf_storeBufferEmptyAfterLoad, // @[ScratchpadSlavePort.scala:44:16]
input io_dmem_perf_storeBufferEmptyAfterStore // @[ScratchpadSlavePort.scala:44:16]
);
wire auto_in_a_valid_0 = auto_in_a_valid; // @[ScratchpadSlavePort.scala:43:9]
wire [2:0] auto_in_a_bits_opcode_0 = auto_in_a_bits_opcode; // @[ScratchpadSlavePort.scala:43:9]
wire [2:0] auto_in_a_bits_param_0 = auto_in_a_bits_param; // @[ScratchpadSlavePort.scala:43:9]
wire [1:0] auto_in_a_bits_size_0 = auto_in_a_bits_size; // @[ScratchpadSlavePort.scala:43:9]
wire [11:0] auto_in_a_bits_source_0 = auto_in_a_bits_source; // @[ScratchpadSlavePort.scala:43:9]
wire [21:0] auto_in_a_bits_address_0 = auto_in_a_bits_address; // @[ScratchpadSlavePort.scala:43:9]
wire [7:0] auto_in_a_bits_mask_0 = auto_in_a_bits_mask; // @[ScratchpadSlavePort.scala:43:9]
wire [63:0] auto_in_a_bits_data_0 = auto_in_a_bits_data; // @[ScratchpadSlavePort.scala:43:9]
wire auto_in_a_bits_corrupt_0 = auto_in_a_bits_corrupt; // @[ScratchpadSlavePort.scala:43:9]
wire auto_in_d_ready_0 = auto_in_d_ready; // @[ScratchpadSlavePort.scala:43:9]
wire io_dmem_req_ready_0 = io_dmem_req_ready; // @[ScratchpadSlavePort.scala:43:9]
wire io_dmem_s2_nack_0 = io_dmem_s2_nack; // @[ScratchpadSlavePort.scala:43:9]
wire io_dmem_s2_nack_cause_raw_0 = io_dmem_s2_nack_cause_raw; // @[ScratchpadSlavePort.scala:43:9]
wire io_dmem_s2_uncached_0 = io_dmem_s2_uncached; // @[ScratchpadSlavePort.scala:43:9]
wire [31:0] io_dmem_s2_paddr_0 = io_dmem_s2_paddr; // @[ScratchpadSlavePort.scala:43:9]
wire io_dmem_resp_valid_0 = io_dmem_resp_valid; // @[ScratchpadSlavePort.scala:43:9]
wire [39:0] io_dmem_resp_bits_addr_0 = io_dmem_resp_bits_addr; // @[ScratchpadSlavePort.scala:43:9]
wire [7:0] io_dmem_resp_bits_tag_0 = io_dmem_resp_bits_tag; // @[ScratchpadSlavePort.scala:43:9]
wire [4:0] io_dmem_resp_bits_cmd_0 = io_dmem_resp_bits_cmd; // @[ScratchpadSlavePort.scala:43:9]
wire [1:0] io_dmem_resp_bits_size_0 = io_dmem_resp_bits_size; // @[ScratchpadSlavePort.scala:43:9]
wire io_dmem_resp_bits_signed_0 = io_dmem_resp_bits_signed; // @[ScratchpadSlavePort.scala:43:9]
wire [1:0] io_dmem_resp_bits_dprv_0 = io_dmem_resp_bits_dprv; // @[ScratchpadSlavePort.scala:43:9]
wire io_dmem_resp_bits_dv_0 = io_dmem_resp_bits_dv; // @[ScratchpadSlavePort.scala:43:9]
wire [63:0] io_dmem_resp_bits_data_0 = io_dmem_resp_bits_data; // @[ScratchpadSlavePort.scala:43:9]
wire [7:0] io_dmem_resp_bits_mask_0 = io_dmem_resp_bits_mask; // @[ScratchpadSlavePort.scala:43:9]
wire io_dmem_resp_bits_replay_0 = io_dmem_resp_bits_replay; // @[ScratchpadSlavePort.scala:43:9]
wire io_dmem_resp_bits_has_data_0 = io_dmem_resp_bits_has_data; // @[ScratchpadSlavePort.scala:43:9]
wire [63:0] io_dmem_resp_bits_data_word_bypass_0 = io_dmem_resp_bits_data_word_bypass; // @[ScratchpadSlavePort.scala:43:9]
wire [63:0] io_dmem_resp_bits_data_raw_0 = io_dmem_resp_bits_data_raw; // @[ScratchpadSlavePort.scala:43:9]
wire [63:0] io_dmem_resp_bits_store_data_0 = io_dmem_resp_bits_store_data; // @[ScratchpadSlavePort.scala:43:9]
wire io_dmem_replay_next_0 = io_dmem_replay_next; // @[ScratchpadSlavePort.scala:43:9]
wire io_dmem_s2_xcpt_ma_ld_0 = io_dmem_s2_xcpt_ma_ld; // @[ScratchpadSlavePort.scala:43:9]
wire io_dmem_s2_xcpt_ma_st_0 = io_dmem_s2_xcpt_ma_st; // @[ScratchpadSlavePort.scala:43:9]
wire io_dmem_s2_xcpt_pf_ld_0 = io_dmem_s2_xcpt_pf_ld; // @[ScratchpadSlavePort.scala:43:9]
wire io_dmem_s2_xcpt_pf_st_0 = io_dmem_s2_xcpt_pf_st; // @[ScratchpadSlavePort.scala:43:9]
wire io_dmem_s2_xcpt_ae_ld_0 = io_dmem_s2_xcpt_ae_ld; // @[ScratchpadSlavePort.scala:43:9]
wire io_dmem_s2_xcpt_ae_st_0 = io_dmem_s2_xcpt_ae_st; // @[ScratchpadSlavePort.scala:43:9]
wire [39:0] io_dmem_s2_gpa_0 = io_dmem_s2_gpa; // @[ScratchpadSlavePort.scala:43:9]
wire io_dmem_ordered_0 = io_dmem_ordered; // @[ScratchpadSlavePort.scala:43:9]
wire io_dmem_store_pending_0 = io_dmem_store_pending; // @[ScratchpadSlavePort.scala:43:9]
wire io_dmem_perf_acquire_0 = io_dmem_perf_acquire; // @[ScratchpadSlavePort.scala:43:9]
wire io_dmem_perf_grant_0 = io_dmem_perf_grant; // @[ScratchpadSlavePort.scala:43:9]
wire io_dmem_perf_tlbMiss_0 = io_dmem_perf_tlbMiss; // @[ScratchpadSlavePort.scala:43:9]
wire io_dmem_perf_blocked_0 = io_dmem_perf_blocked; // @[ScratchpadSlavePort.scala:43:9]
wire io_dmem_perf_canAcceptStoreThenLoad_0 = io_dmem_perf_canAcceptStoreThenLoad; // @[ScratchpadSlavePort.scala:43:9]
wire io_dmem_perf_canAcceptStoreThenRMW_0 = io_dmem_perf_canAcceptStoreThenRMW; // @[ScratchpadSlavePort.scala:43:9]
wire io_dmem_perf_canAcceptLoadThenLoad_0 = io_dmem_perf_canAcceptLoadThenLoad; // @[ScratchpadSlavePort.scala:43:9]
wire io_dmem_perf_storeBufferEmptyAfterLoad_0 = io_dmem_perf_storeBufferEmptyAfterLoad; // @[ScratchpadSlavePort.scala:43:9]
wire io_dmem_perf_storeBufferEmptyAfterStore_0 = io_dmem_perf_storeBufferEmptyAfterStore; // @[ScratchpadSlavePort.scala:43:9]
wire [2:0] nodeIn_d_bits_d_opcode = 3'h0; // @[Edges.scala:792:17]
wire [2:0] nodeIn_d_bits_d_1_opcode = 3'h1; // @[Edges.scala:810:17]
wire [63:0] io_dmem_req_bits_data = 64'h0; // @[ScratchpadSlavePort.scala:43:9]
wire [63:0] io_dmem_req_bits_req_data = 64'h0; // @[ScratchpadSlavePort.scala:66:21]
wire [63:0] nodeIn_d_bits_d_data = 64'h0; // @[Edges.scala:792:17]
wire [63:0] nodeIn_d_bits_d_1_data = 64'h0; // @[Edges.scala:810:17]
wire [63:0] _nodeIn_d_bits_T_3_data = 64'h0; // @[ScratchpadSlavePort.scala:126:24]
wire [7:0] io_dmem_req_bits_tag = 8'h0; // @[ScratchpadSlavePort.scala:43:9]
wire [7:0] io_dmem_req_bits_mask = 8'h0; // @[ScratchpadSlavePort.scala:43:9]
wire [7:0] io_dmem_req_bits_req_tag = 8'h0; // @[ScratchpadSlavePort.scala:66:21]
wire [7:0] io_dmem_req_bits_req_mask = 8'h0; // @[ScratchpadSlavePort.scala:66:21]
wire [1:0] auto_in_d_bits_param = 2'h0; // @[ScratchpadSlavePort.scala:43:9]
wire [1:0] io_dmem_req_bits_dprv = 2'h0; // @[ScratchpadSlavePort.scala:43:9]
wire [1:0] nodeIn_d_bits_param = 2'h0; // @[MixedNode.scala:551:17]
wire [1:0] io_dmem_req_bits_req_dprv = 2'h0; // @[ScratchpadSlavePort.scala:66:21]
wire [1:0] nodeIn_d_bits_d_param = 2'h0; // @[Edges.scala:792:17]
wire [1:0] nodeIn_d_bits_d_1_param = 2'h0; // @[Edges.scala:810:17]
wire [1:0] _nodeIn_d_bits_T_3_param = 2'h0; // @[ScratchpadSlavePort.scala:126:24]
wire io_dmem_req_bits_phys = 1'h1; // @[ScratchpadSlavePort.scala:43:9]
wire io_dmem_req_bits_no_xcpt = 1'h1; // @[ScratchpadSlavePort.scala:43:9]
wire io_dmem_clock_enabled = 1'h1; // @[ScratchpadSlavePort.scala:43:9]
wire io_dmem_req_bits_req_phys = 1'h1; // @[ScratchpadSlavePort.scala:66:21]
wire io_dmem_req_bits_req_no_xcpt = 1'h1; // @[ScratchpadSlavePort.scala:66:21]
wire auto_in_d_bits_sink = 1'h0; // @[ScratchpadSlavePort.scala:43:9]
wire auto_in_d_bits_denied = 1'h0; // @[ScratchpadSlavePort.scala:43:9]
wire auto_in_d_bits_corrupt = 1'h0; // @[ScratchpadSlavePort.scala:43:9]
wire io_dmem_req_bits_signed = 1'h0; // @[ScratchpadSlavePort.scala:43:9]
wire io_dmem_req_bits_dv = 1'h0; // @[ScratchpadSlavePort.scala:43:9]
wire io_dmem_req_bits_no_resp = 1'h0; // @[ScratchpadSlavePort.scala:43:9]
wire io_dmem_req_bits_no_alloc = 1'h0; // @[ScratchpadSlavePort.scala:43:9]
wire io_dmem_s2_kill = 1'h0; // @[ScratchpadSlavePort.scala:43:9]
wire io_dmem_s2_xcpt_gf_ld = 1'h0; // @[ScratchpadSlavePort.scala:43:9]
wire io_dmem_s2_xcpt_gf_st = 1'h0; // @[ScratchpadSlavePort.scala:43:9]
wire io_dmem_s2_gpa_is_pte = 1'h0; // @[ScratchpadSlavePort.scala:43:9]
wire io_dmem_perf_release = 1'h0; // @[ScratchpadSlavePort.scala:43:9]
wire io_dmem_keep_clock_enabled = 1'h0; // @[ScratchpadSlavePort.scala:43:9]
wire nodeIn_a_ready; // @[MixedNode.scala:551:17]
wire nodeIn_d_bits_sink = 1'h0; // @[MixedNode.scala:551:17]
wire nodeIn_d_bits_denied = 1'h0; // @[MixedNode.scala:551:17]
wire nodeIn_d_bits_corrupt = 1'h0; // @[MixedNode.scala:551:17]
wire io_dmem_req_bits_req_signed = 1'h0; // @[ScratchpadSlavePort.scala:66:21]
wire io_dmem_req_bits_req_dv = 1'h0; // @[ScratchpadSlavePort.scala:66:21]
wire io_dmem_req_bits_req_no_resp = 1'h0; // @[ScratchpadSlavePort.scala:66:21]
wire io_dmem_req_bits_req_no_alloc = 1'h0; // @[ScratchpadSlavePort.scala:66:21]
wire nodeIn_d_bits_d_sink = 1'h0; // @[Edges.scala:792:17]
wire nodeIn_d_bits_d_denied = 1'h0; // @[Edges.scala:792:17]
wire nodeIn_d_bits_d_corrupt = 1'h0; // @[Edges.scala:792:17]
wire nodeIn_d_bits_d_1_sink = 1'h0; // @[Edges.scala:810:17]
wire nodeIn_d_bits_d_1_denied = 1'h0; // @[Edges.scala:810:17]
wire nodeIn_d_bits_d_1_corrupt = 1'h0; // @[Edges.scala:810:17]
wire _nodeIn_d_bits_T_3_sink = 1'h0; // @[ScratchpadSlavePort.scala:126:24]
wire _nodeIn_d_bits_T_3_denied = 1'h0; // @[ScratchpadSlavePort.scala:126:24]
wire _nodeIn_d_bits_T_3_corrupt = 1'h0; // @[ScratchpadSlavePort.scala:126:24]
wire nodeIn_a_valid = auto_in_a_valid_0; // @[ScratchpadSlavePort.scala:43:9]
wire [2:0] nodeIn_a_bits_opcode = auto_in_a_bits_opcode_0; // @[ScratchpadSlavePort.scala:43:9]
wire [2:0] nodeIn_a_bits_param = auto_in_a_bits_param_0; // @[ScratchpadSlavePort.scala:43:9]
wire [1:0] nodeIn_a_bits_size = auto_in_a_bits_size_0; // @[ScratchpadSlavePort.scala:43:9]
wire [11:0] nodeIn_a_bits_source = auto_in_a_bits_source_0; // @[ScratchpadSlavePort.scala:43:9]
wire [21:0] nodeIn_a_bits_address = auto_in_a_bits_address_0; // @[ScratchpadSlavePort.scala:43:9]
wire [7:0] nodeIn_a_bits_mask = auto_in_a_bits_mask_0; // @[ScratchpadSlavePort.scala:43:9]
wire [63:0] nodeIn_a_bits_data = auto_in_a_bits_data_0; // @[ScratchpadSlavePort.scala:43:9]
wire nodeIn_a_bits_corrupt = auto_in_a_bits_corrupt_0; // @[ScratchpadSlavePort.scala:43:9]
wire nodeIn_d_ready = auto_in_d_ready_0; // @[ScratchpadSlavePort.scala:43:9]
wire nodeIn_d_valid; // @[MixedNode.scala:551:17]
wire [2:0] nodeIn_d_bits_opcode; // @[MixedNode.scala:551:17]
wire [1:0] nodeIn_d_bits_size; // @[MixedNode.scala:551:17]
wire [11:0] nodeIn_d_bits_source; // @[MixedNode.scala:551:17]
wire [63:0] nodeIn_d_bits_data; // @[MixedNode.scala:551:17]
wire dmem_req_valid_likely; // @[ScratchpadSlavePort.scala:114:65]
wire [39:0] io_dmem_req_bits_req_addr; // @[ScratchpadSlavePort.scala:66:21]
wire [4:0] io_dmem_req_bits_req_cmd; // @[ScratchpadSlavePort.scala:66:21]
wire [1:0] io_dmem_req_bits_req_size; // @[ScratchpadSlavePort.scala:66:21]
wire _io_dmem_s1_kill_T; // @[ScratchpadSlavePort.scala:122:30]
wire auto_in_a_ready_0; // @[ScratchpadSlavePort.scala:43:9]
wire [2:0] auto_in_d_bits_opcode_0; // @[ScratchpadSlavePort.scala:43:9]
wire [1:0] auto_in_d_bits_size_0; // @[ScratchpadSlavePort.scala:43:9]
wire [11:0] auto_in_d_bits_source_0; // @[ScratchpadSlavePort.scala:43:9]
wire [63:0] auto_in_d_bits_data_0; // @[ScratchpadSlavePort.scala:43:9]
wire auto_in_d_valid_0; // @[ScratchpadSlavePort.scala:43:9]
wire [39:0] io_dmem_req_bits_addr_0; // @[ScratchpadSlavePort.scala:43:9]
wire [4:0] io_dmem_req_bits_cmd_0; // @[ScratchpadSlavePort.scala:43:9]
wire [1:0] io_dmem_req_bits_size_0; // @[ScratchpadSlavePort.scala:43:9]
wire io_dmem_req_valid_0; // @[ScratchpadSlavePort.scala:43:9]
wire [63:0] io_dmem_s1_data_data_0; // @[ScratchpadSlavePort.scala:43:9]
wire [7:0] io_dmem_s1_data_mask_0; // @[ScratchpadSlavePort.scala:43:9]
wire io_dmem_s1_kill_0; // @[ScratchpadSlavePort.scala:43:9]
wire _nodeIn_a_ready_T; // @[ScratchpadSlavePort.scala:118:40]
assign auto_in_a_ready_0 = nodeIn_a_ready; // @[ScratchpadSlavePort.scala:43:9]
wire _nodeIn_d_valid_T_1; // @[ScratchpadSlavePort.scala:125:41]
assign auto_in_d_valid_0 = nodeIn_d_valid; // @[ScratchpadSlavePort.scala:43:9]
wire [2:0] _nodeIn_d_bits_T_3_opcode; // @[ScratchpadSlavePort.scala:126:24]
assign auto_in_d_bits_opcode_0 = nodeIn_d_bits_opcode; // @[ScratchpadSlavePort.scala:43:9]
wire [1:0] _nodeIn_d_bits_T_3_size; // @[ScratchpadSlavePort.scala:126:24]
assign auto_in_d_bits_size_0 = nodeIn_d_bits_size; // @[ScratchpadSlavePort.scala:43:9]
wire [11:0] _nodeIn_d_bits_T_3_source; // @[ScratchpadSlavePort.scala:126:24]
assign auto_in_d_bits_source_0 = nodeIn_d_bits_source; // @[ScratchpadSlavePort.scala:43:9]
wire [63:0] _nodeIn_d_bits_data_T_1; // @[package.scala:88:42]
assign auto_in_d_bits_data_0 = nodeIn_d_bits_data; // @[ScratchpadSlavePort.scala:43:9]
reg [2:0] state; // @[ScratchpadSlavePort.scala:53:24]
wire _dmem_req_valid_T_2; // @[ScratchpadSlavePort.scala:113:48]
wire dmem_req_valid; // @[ScratchpadSlavePort.scala:54:30]
wire _io_dmem_req_bits_T_2 = state == 3'h4; // @[ScratchpadSlavePort.scala:53:24, :56:17, :89:19]
reg [2:0] acq_opcode; // @[ScratchpadSlavePort.scala:62:18]
reg [2:0] acq_param; // @[ScratchpadSlavePort.scala:62:18]
reg [1:0] acq_size; // @[ScratchpadSlavePort.scala:62:18]
wire [1:0] nodeIn_d_bits_d_size = acq_size; // @[Edges.scala:792:17]
wire [1:0] nodeIn_d_bits_d_1_size = acq_size; // @[Edges.scala:810:17]
reg [11:0] acq_source; // @[ScratchpadSlavePort.scala:62:18]
wire [11:0] nodeIn_d_bits_d_source = acq_source; // @[Edges.scala:792:17]
wire [11:0] nodeIn_d_bits_d_1_source = acq_source; // @[Edges.scala:810:17]
reg [21:0] acq_address; // @[ScratchpadSlavePort.scala:62:18]
reg [7:0] acq_mask; // @[ScratchpadSlavePort.scala:62:18]
assign io_dmem_s1_data_mask_0 = acq_mask; // @[ScratchpadSlavePort.scala:43:9, :62:18]
reg [63:0] acq_data; // @[ScratchpadSlavePort.scala:62:18]
assign io_dmem_s1_data_data_0 = acq_data; // @[ScratchpadSlavePort.scala:43:9, :62:18]
reg acq_corrupt; // @[ScratchpadSlavePort.scala:62:18]
wire _GEN = state == 3'h0; // @[package.scala:16:47]
wire _ready_likely_T; // @[package.scala:16:47]
assign _ready_likely_T = _GEN; // @[package.scala:16:47]
wire _ready_T; // @[ScratchpadSlavePort.scala:112:23]
assign _ready_T = _GEN; // @[package.scala:16:47]
wire _GEN_0 = state == 3'h2; // @[package.scala:16:47]
wire _ready_likely_T_1; // @[package.scala:16:47]
assign _ready_likely_T_1 = _GEN_0; // @[package.scala:16:47]
wire _ready_T_1; // @[ScratchpadSlavePort.scala:112:44]
assign _ready_T_1 = _GEN_0; // @[package.scala:16:47]
wire _nodeIn_d_bits_data_T; // @[ScratchpadSlavePort.scala:129:70]
assign _nodeIn_d_bits_data_T = _GEN_0; // @[package.scala:16:47]
wire ready_likely = _ready_likely_T | _ready_likely_T_1; // @[package.scala:16:47, :81:59]
wire _ready_T_2 = _ready_T_1 & io_dmem_resp_valid_0; // @[ScratchpadSlavePort.scala:43:9, :112:{44,56}]
wire _ready_T_3 = _ready_T_2 & nodeIn_d_ready; // @[ScratchpadSlavePort.scala:112:{56,78}]
wire ready = _ready_T | _ready_T_3; // @[ScratchpadSlavePort.scala:112:{23,35,78}]
wire _dmem_req_valid_T = nodeIn_a_valid & ready; // @[ScratchpadSlavePort.scala:112:35, :113:38]
wire _GEN_1 = state == 3'h3; // @[ScratchpadSlavePort.scala:53:24, :67:44, :113:57]
wire _dmem_req_valid_T_1; // @[ScratchpadSlavePort.scala:113:57]
assign _dmem_req_valid_T_1 = _GEN_1; // @[ScratchpadSlavePort.scala:113:57]
wire _dmem_req_valid_likely_T_1; // @[ScratchpadSlavePort.scala:114:74]
assign _dmem_req_valid_likely_T_1 = _GEN_1; // @[ScratchpadSlavePort.scala:113:57, :114:74]
wire _io_dmem_req_bits_T; // @[ScratchpadSlavePort.scala:119:48]
assign _io_dmem_req_bits_T = _GEN_1; // @[ScratchpadSlavePort.scala:113:57, :119:48]
assign _dmem_req_valid_T_2 = _dmem_req_valid_T | _dmem_req_valid_T_1; // @[ScratchpadSlavePort.scala:113:{38,48,57}]
assign dmem_req_valid = _dmem_req_valid_T_2; // @[ScratchpadSlavePort.scala:54:30, :113:48]
wire _dmem_req_valid_likely_T = nodeIn_a_valid & ready_likely; // @[package.scala:81:59]
assign dmem_req_valid_likely = _dmem_req_valid_likely_T | _dmem_req_valid_likely_T_1; // @[ScratchpadSlavePort.scala:114:{48,65,74}]
assign io_dmem_req_valid_0 = dmem_req_valid_likely; // @[ScratchpadSlavePort.scala:43:9, :114:65]
assign _nodeIn_a_ready_T = io_dmem_req_ready_0 & ready; // @[ScratchpadSlavePort.scala:43:9, :112:35, :118:40]
assign nodeIn_a_ready = _nodeIn_a_ready_T; // @[ScratchpadSlavePort.scala:118:40]
wire [2:0] _io_dmem_req_bits_T_1_opcode = _io_dmem_req_bits_T ? acq_opcode : nodeIn_a_bits_opcode; // @[ScratchpadSlavePort.scala:62:18, :119:{41,48}]
wire [2:0] _io_dmem_req_bits_T_1_param = _io_dmem_req_bits_T ? acq_param : nodeIn_a_bits_param; // @[ScratchpadSlavePort.scala:62:18, :119:{41,48}]
wire [1:0] _io_dmem_req_bits_T_1_size = _io_dmem_req_bits_T ? acq_size : nodeIn_a_bits_size; // @[ScratchpadSlavePort.scala:62:18, :119:{41,48}]
wire [11:0] _io_dmem_req_bits_T_1_source = _io_dmem_req_bits_T ? acq_source : nodeIn_a_bits_source; // @[ScratchpadSlavePort.scala:62:18, :119:{41,48}]
wire [21:0] _io_dmem_req_bits_T_1_address = _io_dmem_req_bits_T ? acq_address : nodeIn_a_bits_address; // @[ScratchpadSlavePort.scala:62:18, :119:{41,48}]
wire [7:0] _io_dmem_req_bits_T_1_mask = _io_dmem_req_bits_T ? acq_mask : nodeIn_a_bits_mask; // @[ScratchpadSlavePort.scala:62:18, :119:{41,48}]
wire [63:0] _io_dmem_req_bits_T_1_data = _io_dmem_req_bits_T ? acq_data : nodeIn_a_bits_data; // @[ScratchpadSlavePort.scala:62:18, :119:{41,48}]
wire _io_dmem_req_bits_T_1_corrupt = _io_dmem_req_bits_T ? acq_corrupt : nodeIn_a_bits_corrupt; // @[ScratchpadSlavePort.scala:62:18, :119:{41,48}]
assign io_dmem_req_bits_req_size = _io_dmem_req_bits_T_1_size; // @[ScratchpadSlavePort.scala:66:21, :119:41]
wire [1:0] io_dmem_req_bits_mask_full_desired_mask_size = _io_dmem_req_bits_T_1_size; // @[ScratchpadSlavePort.scala:119:41]
assign io_dmem_req_bits_addr_0 = io_dmem_req_bits_req_addr; // @[ScratchpadSlavePort.scala:43:9, :66:21]
assign io_dmem_req_bits_cmd_0 = io_dmem_req_bits_req_cmd; // @[ScratchpadSlavePort.scala:43:9, :66:21]
assign io_dmem_req_bits_size_0 = io_dmem_req_bits_req_size; // @[ScratchpadSlavePort.scala:43:9, :66:21]
wire _GEN_2 = _io_dmem_req_bits_T_1_param == 3'h0; // @[ScratchpadSlavePort.scala:70:63, :119:41]
wire _io_dmem_req_bits_req_cmd_T; // @[ScratchpadSlavePort.scala:70:63]
assign _io_dmem_req_bits_req_cmd_T = _GEN_2; // @[ScratchpadSlavePort.scala:70:63]
wire _io_dmem_req_bits_req_cmd_T_10; // @[ScratchpadSlavePort.scala:76:63]
assign _io_dmem_req_bits_req_cmd_T_10 = _GEN_2; // @[ScratchpadSlavePort.scala:70:63, :76:63]
wire [3:0] _io_dmem_req_bits_req_cmd_T_1 = _io_dmem_req_bits_req_cmd_T ? 4'hC : 4'h0; // @[ScratchpadSlavePort.scala:70:63]
wire _GEN_3 = _io_dmem_req_bits_T_1_param == 3'h1; // @[ScratchpadSlavePort.scala:67:44, :70:63, :119:41]
wire _io_dmem_req_bits_req_cmd_T_2; // @[ScratchpadSlavePort.scala:70:63]
assign _io_dmem_req_bits_req_cmd_T_2 = _GEN_3; // @[ScratchpadSlavePort.scala:70:63]
wire _io_dmem_req_bits_req_cmd_T_12; // @[ScratchpadSlavePort.scala:76:63]
assign _io_dmem_req_bits_req_cmd_T_12 = _GEN_3; // @[ScratchpadSlavePort.scala:70:63, :76:63]
wire [3:0] _io_dmem_req_bits_req_cmd_T_3 = _io_dmem_req_bits_req_cmd_T_2 ? 4'hD : _io_dmem_req_bits_req_cmd_T_1; // @[ScratchpadSlavePort.scala:70:63]
wire _GEN_4 = _io_dmem_req_bits_T_1_param == 3'h2; // @[ScratchpadSlavePort.scala:67:44, :70:63, :119:41]
wire _io_dmem_req_bits_req_cmd_T_4; // @[ScratchpadSlavePort.scala:70:63]
assign _io_dmem_req_bits_req_cmd_T_4 = _GEN_4; // @[ScratchpadSlavePort.scala:70:63]
wire _io_dmem_req_bits_req_cmd_T_14; // @[ScratchpadSlavePort.scala:76:63]
assign _io_dmem_req_bits_req_cmd_T_14 = _GEN_4; // @[ScratchpadSlavePort.scala:70:63, :76:63]
wire [3:0] _io_dmem_req_bits_req_cmd_T_5 = _io_dmem_req_bits_req_cmd_T_4 ? 4'hE : _io_dmem_req_bits_req_cmd_T_3; // @[ScratchpadSlavePort.scala:70:63]
wire _GEN_5 = _io_dmem_req_bits_T_1_param == 3'h3; // @[ScratchpadSlavePort.scala:67:44, :70:63, :119:41]
wire _io_dmem_req_bits_req_cmd_T_6; // @[ScratchpadSlavePort.scala:70:63]
assign _io_dmem_req_bits_req_cmd_T_6 = _GEN_5; // @[ScratchpadSlavePort.scala:70:63]
wire _io_dmem_req_bits_req_cmd_T_16; // @[ScratchpadSlavePort.scala:76:63]
assign _io_dmem_req_bits_req_cmd_T_16 = _GEN_5; // @[ScratchpadSlavePort.scala:70:63, :76:63]
wire [3:0] _io_dmem_req_bits_req_cmd_T_7 = _io_dmem_req_bits_req_cmd_T_6 ? 4'hF : _io_dmem_req_bits_req_cmd_T_5; // @[ScratchpadSlavePort.scala:70:63]
wire _io_dmem_req_bits_req_cmd_T_8 = _io_dmem_req_bits_T_1_param == 3'h4; // @[ScratchpadSlavePort.scala:70:63, :119:41]
wire [3:0] _io_dmem_req_bits_req_cmd_T_9 = _io_dmem_req_bits_req_cmd_T_8 ? 4'h8 : _io_dmem_req_bits_req_cmd_T_7; // @[ScratchpadSlavePort.scala:70:63]
wire [3:0] _io_dmem_req_bits_req_cmd_T_11 = _io_dmem_req_bits_req_cmd_T_10 ? 4'h9 : 4'h0; // @[ScratchpadSlavePort.scala:76:63]
wire [3:0] _io_dmem_req_bits_req_cmd_T_13 = _io_dmem_req_bits_req_cmd_T_12 ? 4'hA : _io_dmem_req_bits_req_cmd_T_11; // @[ScratchpadSlavePort.scala:76:63]
wire [3:0] _io_dmem_req_bits_req_cmd_T_15 = _io_dmem_req_bits_req_cmd_T_14 ? 4'hB : _io_dmem_req_bits_req_cmd_T_13; // @[ScratchpadSlavePort.scala:76:63]
wire [3:0] _io_dmem_req_bits_req_cmd_T_17 = _io_dmem_req_bits_req_cmd_T_16 ? 4'h4 : _io_dmem_req_bits_req_cmd_T_15; // @[ScratchpadSlavePort.scala:76:63]
wire _io_dmem_req_bits_req_cmd_T_18 = _io_dmem_req_bits_T_1_opcode == 3'h0; // @[ScratchpadSlavePort.scala:67:44, :119:41]
wire _io_dmem_req_bits_req_cmd_T_19 = _io_dmem_req_bits_req_cmd_T_18; // @[ScratchpadSlavePort.scala:67:44]
wire _GEN_6 = _io_dmem_req_bits_T_1_opcode == 3'h1; // @[ScratchpadSlavePort.scala:67:44, :119:41]
wire _io_dmem_req_bits_req_cmd_T_20; // @[ScratchpadSlavePort.scala:67:44]
assign _io_dmem_req_bits_req_cmd_T_20 = _GEN_6; // @[ScratchpadSlavePort.scala:67:44]
wire _io_dmem_req_bits_T_3; // @[ScratchpadSlavePort.scala:89:43]
assign _io_dmem_req_bits_T_3 = _GEN_6; // @[ScratchpadSlavePort.scala:67:44, :89:43]
wire [4:0] _io_dmem_req_bits_req_cmd_T_21 = _io_dmem_req_bits_req_cmd_T_20 ? 5'h11 : {4'h0, _io_dmem_req_bits_req_cmd_T_19}; // @[ScratchpadSlavePort.scala:67:44]
wire _io_dmem_req_bits_req_cmd_T_22 = _io_dmem_req_bits_T_1_opcode == 3'h2; // @[ScratchpadSlavePort.scala:67:44, :119:41]
wire [4:0] _io_dmem_req_bits_req_cmd_T_23 = _io_dmem_req_bits_req_cmd_T_22 ? {1'h0, _io_dmem_req_bits_req_cmd_T_9} : _io_dmem_req_bits_req_cmd_T_21; // @[ScratchpadSlavePort.scala:67:44, :70:63]
wire _io_dmem_req_bits_req_cmd_T_24 = _io_dmem_req_bits_T_1_opcode == 3'h3; // @[ScratchpadSlavePort.scala:67:44, :119:41]
wire [4:0] _io_dmem_req_bits_req_cmd_T_25 = _io_dmem_req_bits_req_cmd_T_24 ? {1'h0, _io_dmem_req_bits_req_cmd_T_17} : _io_dmem_req_bits_req_cmd_T_23; // @[ScratchpadSlavePort.scala:67:44, :76:63]
wire _io_dmem_req_bits_req_cmd_T_26 = _io_dmem_req_bits_T_1_opcode == 3'h4; // @[ScratchpadSlavePort.scala:67:44, :119:41]
wire [4:0] _io_dmem_req_bits_req_cmd_T_27 = _io_dmem_req_bits_req_cmd_T_26 ? 5'h0 : _io_dmem_req_bits_req_cmd_T_25; // @[ScratchpadSlavePort.scala:67:44]
wire _io_dmem_req_bits_mask_full_desired_mask_upper_T = _io_dmem_req_bits_T_1_address[0]; // @[ScratchpadSlavePort.scala:119:41]
wire _io_dmem_req_bits_mask_full_desired_mask_lower_T = _io_dmem_req_bits_T_1_address[0]; // @[ScratchpadSlavePort.scala:119:41]
wire _io_dmem_req_bits_mask_full_desired_mask_upper_T_1 = _io_dmem_req_bits_mask_full_desired_mask_upper_T; // @[AMOALU.scala:20:{22,27}]
wire _io_dmem_req_bits_mask_full_desired_mask_upper_T_2 = |io_dmem_req_bits_mask_full_desired_mask_size; // @[AMOALU.scala:11:18, :20:53]
wire _io_dmem_req_bits_mask_full_desired_mask_upper_T_3 = _io_dmem_req_bits_mask_full_desired_mask_upper_T_2; // @[AMOALU.scala:20:{47,53}]
wire io_dmem_req_bits_mask_full_desired_mask_upper = _io_dmem_req_bits_mask_full_desired_mask_upper_T_1 | _io_dmem_req_bits_mask_full_desired_mask_upper_T_3; // @[AMOALU.scala:20:{22,42,47}]
wire io_dmem_req_bits_mask_full_desired_mask_lower = ~_io_dmem_req_bits_mask_full_desired_mask_lower_T; // @[AMOALU.scala:21:{22,27}]
wire [1:0] _io_dmem_req_bits_mask_full_desired_mask_T = {io_dmem_req_bits_mask_full_desired_mask_upper, io_dmem_req_bits_mask_full_desired_mask_lower}; // @[AMOALU.scala:20:42, :21:22, :22:16]
wire _io_dmem_req_bits_mask_full_desired_mask_upper_T_4 = _io_dmem_req_bits_T_1_address[1]; // @[ScratchpadSlavePort.scala:119:41]
wire _io_dmem_req_bits_mask_full_desired_mask_lower_T_1 = _io_dmem_req_bits_T_1_address[1]; // @[ScratchpadSlavePort.scala:119:41]
wire [1:0] _io_dmem_req_bits_mask_full_desired_mask_upper_T_5 = _io_dmem_req_bits_mask_full_desired_mask_upper_T_4 ? _io_dmem_req_bits_mask_full_desired_mask_T : 2'h0; // @[AMOALU.scala:20:{22,27}, :22:16]
wire _io_dmem_req_bits_mask_full_desired_mask_upper_T_6 = io_dmem_req_bits_mask_full_desired_mask_size[1]; // @[AMOALU.scala:11:18, :20:53]
wire [1:0] _io_dmem_req_bits_mask_full_desired_mask_upper_T_7 = {2{_io_dmem_req_bits_mask_full_desired_mask_upper_T_6}}; // @[AMOALU.scala:20:{47,53}]
wire [1:0] io_dmem_req_bits_mask_full_desired_mask_upper_1 = _io_dmem_req_bits_mask_full_desired_mask_upper_T_5 | _io_dmem_req_bits_mask_full_desired_mask_upper_T_7; // @[AMOALU.scala:20:{22,42,47}]
wire [1:0] io_dmem_req_bits_mask_full_desired_mask_lower_1 = _io_dmem_req_bits_mask_full_desired_mask_lower_T_1 ? 2'h0 : _io_dmem_req_bits_mask_full_desired_mask_T; // @[AMOALU.scala:21:{22,27}, :22:16]
wire [3:0] _io_dmem_req_bits_mask_full_desired_mask_T_1 = {io_dmem_req_bits_mask_full_desired_mask_upper_1, io_dmem_req_bits_mask_full_desired_mask_lower_1}; // @[AMOALU.scala:20:42, :21:22, :22:16]
wire _io_dmem_req_bits_mask_full_desired_mask_upper_T_8 = _io_dmem_req_bits_T_1_address[2]; // @[ScratchpadSlavePort.scala:119:41]
wire _io_dmem_req_bits_mask_full_desired_mask_lower_T_2 = _io_dmem_req_bits_T_1_address[2]; // @[ScratchpadSlavePort.scala:119:41]
wire [3:0] _io_dmem_req_bits_mask_full_desired_mask_upper_T_9 = _io_dmem_req_bits_mask_full_desired_mask_upper_T_8 ? _io_dmem_req_bits_mask_full_desired_mask_T_1 : 4'h0; // @[AMOALU.scala:20:{22,27}, :22:16]
wire _io_dmem_req_bits_mask_full_desired_mask_upper_T_10 = &io_dmem_req_bits_mask_full_desired_mask_size; // @[AMOALU.scala:11:18, :20:53]
wire [3:0] _io_dmem_req_bits_mask_full_desired_mask_upper_T_11 = {4{_io_dmem_req_bits_mask_full_desired_mask_upper_T_10}}; // @[AMOALU.scala:20:{47,53}]
wire [3:0] io_dmem_req_bits_mask_full_desired_mask_upper_2 = _io_dmem_req_bits_mask_full_desired_mask_upper_T_9 | _io_dmem_req_bits_mask_full_desired_mask_upper_T_11; // @[AMOALU.scala:20:{22,42,47}]
wire [3:0] io_dmem_req_bits_mask_full_desired_mask_lower_2 = _io_dmem_req_bits_mask_full_desired_mask_lower_T_2 ? 4'h0 : _io_dmem_req_bits_mask_full_desired_mask_T_1; // @[AMOALU.scala:21:{22,27}, :22:16]
wire [7:0] io_dmem_req_bits_mask_full_desired_mask = {io_dmem_req_bits_mask_full_desired_mask_upper_2, io_dmem_req_bits_mask_full_desired_mask_lower_2}; // @[AMOALU.scala:20:42, :21:22, :22:16]
wire [7:0] _io_dmem_req_bits_mask_full_T = ~io_dmem_req_bits_mask_full_desired_mask; // @[ScratchpadSlavePort.scala:87:19]
wire [7:0] _io_dmem_req_bits_mask_full_T_1 = _io_dmem_req_bits_T_1_mask | _io_dmem_req_bits_mask_full_T; // @[ScratchpadSlavePort.scala:87:{17,19}, :119:41]
wire io_dmem_req_bits_mask_full = &_io_dmem_req_bits_mask_full_T_1; // @[ScratchpadSlavePort.scala:87:{17,34}]
wire _io_dmem_req_bits_T_4 = _io_dmem_req_bits_T_3 & io_dmem_req_bits_mask_full; // @[ScratchpadSlavePort.scala:87:34, :89:{43,73}]
wire _io_dmem_req_bits_T_5 = _io_dmem_req_bits_T_2 | _io_dmem_req_bits_T_4; // @[ScratchpadSlavePort.scala:89:{19,30,73}]
assign io_dmem_req_bits_req_cmd = _io_dmem_req_bits_T_5 ? 5'h1 : _io_dmem_req_bits_req_cmd_T_27; // @[ScratchpadSlavePort.scala:66:21, :67:{15,44}, :89:{30,88}, :90:17]
assign io_dmem_req_bits_req_addr = {18'h0, _io_dmem_req_bits_T_1_address}; // @[ScratchpadSlavePort.scala:66:21, :95:16, :119:41]
assign _io_dmem_s1_kill_T = state != 3'h1; // @[ScratchpadSlavePort.scala:53:24, :67:44, :122:30]
assign io_dmem_s1_kill_0 = _io_dmem_s1_kill_T; // @[ScratchpadSlavePort.scala:43:9, :122:30]
wire _nodeIn_d_valid_T = state == 3'h5; // @[ScratchpadSlavePort.scala:53:24, :125:50]
assign _nodeIn_d_valid_T_1 = io_dmem_resp_valid_0 | _nodeIn_d_valid_T; // @[ScratchpadSlavePort.scala:43:9, :125:{41,50}]
assign nodeIn_d_valid = _nodeIn_d_valid_T_1; // @[ScratchpadSlavePort.scala:125:41]
wire _nodeIn_d_bits_T = acq_opcode == 3'h0; // @[package.scala:16:47]
wire _nodeIn_d_bits_T_1 = acq_opcode == 3'h1; // @[package.scala:16:47]
wire _nodeIn_d_bits_T_2 = _nodeIn_d_bits_T | _nodeIn_d_bits_T_1; // @[package.scala:16:47, :81:59]
assign _nodeIn_d_bits_T_3_opcode = {2'h0, ~_nodeIn_d_bits_T_2}; // @[package.scala:81:59]
assign _nodeIn_d_bits_T_3_size = _nodeIn_d_bits_T_2 ? nodeIn_d_bits_d_size : nodeIn_d_bits_d_1_size; // @[package.scala:81:59]
assign _nodeIn_d_bits_T_3_source = _nodeIn_d_bits_T_2 ? nodeIn_d_bits_d_source : nodeIn_d_bits_d_1_source; // @[package.scala:81:59]
assign nodeIn_d_bits_opcode = _nodeIn_d_bits_T_3_opcode; // @[ScratchpadSlavePort.scala:126:24]
assign nodeIn_d_bits_size = _nodeIn_d_bits_T_3_size; // @[ScratchpadSlavePort.scala:126:24]
assign nodeIn_d_bits_source = _nodeIn_d_bits_T_3_source; // @[ScratchpadSlavePort.scala:126:24]
reg [63:0] nodeIn_d_bits_data_r; // @[package.scala:88:63]
assign _nodeIn_d_bits_data_T_1 = _nodeIn_d_bits_data_T ? io_dmem_resp_bits_data_raw_0 : nodeIn_d_bits_data_r; // @[package.scala:88:{42,63}]
assign nodeIn_d_bits_data = _nodeIn_d_bits_data_T_1; // @[package.scala:88:42]
always @(posedge clock) begin // @[ScratchpadSlavePort.scala:43:9]
if (reset) // @[ScratchpadSlavePort.scala:43:9]
state <= 3'h4; // @[ScratchpadSlavePort.scala:53:24]
else if (dmem_req_valid & io_dmem_req_ready_0) // @[ScratchpadSlavePort.scala:43:9, :54:30, :60:26]
state <= 3'h1; // @[ScratchpadSlavePort.scala:53:24, :67:44]
else if (io_dmem_s2_nack_0) // @[ScratchpadSlavePort.scala:43:9]
state <= 3'h3; // @[ScratchpadSlavePort.scala:53:24, :67:44]
else if (nodeIn_d_ready & nodeIn_d_valid) // @[Decoupled.scala:51:35]
state <= 3'h0; // @[ScratchpadSlavePort.scala:53:24]
else if (io_dmem_resp_valid_0) // @[ScratchpadSlavePort.scala:43:9]
state <= 3'h5; // @[ScratchpadSlavePort.scala:53:24]
else if (_io_dmem_req_bits_T_2 & nodeIn_a_valid) // @[ScratchpadSlavePort.scala:56:28, :89:19]
state <= 3'h0; // @[ScratchpadSlavePort.scala:53:24]
else if (state == 3'h1) // @[ScratchpadSlavePort.scala:53:24, :55:17, :67:44]
state <= 3'h2; // @[ScratchpadSlavePort.scala:53:24, :67:44]
if (nodeIn_a_ready & nodeIn_a_valid) begin // @[Decoupled.scala:51:35]
acq_opcode <= nodeIn_a_bits_opcode; // @[ScratchpadSlavePort.scala:62:18]
acq_param <= nodeIn_a_bits_param; // @[ScratchpadSlavePort.scala:62:18]
acq_size <= nodeIn_a_bits_size; // @[ScratchpadSlavePort.scala:62:18]
acq_source <= nodeIn_a_bits_source; // @[ScratchpadSlavePort.scala:62:18]
acq_address <= nodeIn_a_bits_address; // @[ScratchpadSlavePort.scala:62:18]
acq_mask <= nodeIn_a_bits_mask; // @[ScratchpadSlavePort.scala:62:18]
acq_data <= nodeIn_a_bits_data; // @[ScratchpadSlavePort.scala:62:18]
acq_corrupt <= nodeIn_a_bits_corrupt; // @[ScratchpadSlavePort.scala:62:18]
end
if (_nodeIn_d_bits_data_T) // @[ScratchpadSlavePort.scala:129:70]
nodeIn_d_bits_data_r <= io_dmem_resp_bits_data_raw_0; // @[package.scala:88:63]
always @(posedge)
assign auto_in_a_ready = auto_in_a_ready_0; // @[ScratchpadSlavePort.scala:43:9]
assign auto_in_d_valid = auto_in_d_valid_0; // @[ScratchpadSlavePort.scala:43:9]
assign auto_in_d_bits_opcode = auto_in_d_bits_opcode_0; // @[ScratchpadSlavePort.scala:43:9]
assign auto_in_d_bits_size = auto_in_d_bits_size_0; // @[ScratchpadSlavePort.scala:43:9]
assign auto_in_d_bits_source = auto_in_d_bits_source_0; // @[ScratchpadSlavePort.scala:43:9]
assign auto_in_d_bits_data = auto_in_d_bits_data_0; // @[ScratchpadSlavePort.scala:43:9]
assign io_dmem_req_valid = io_dmem_req_valid_0; // @[ScratchpadSlavePort.scala:43:9]
assign io_dmem_req_bits_addr = io_dmem_req_bits_addr_0; // @[ScratchpadSlavePort.scala:43:9]
assign io_dmem_req_bits_cmd = io_dmem_req_bits_cmd_0; // @[ScratchpadSlavePort.scala:43:9]
assign io_dmem_req_bits_size = io_dmem_req_bits_size_0; // @[ScratchpadSlavePort.scala:43:9]
assign io_dmem_s1_kill = io_dmem_s1_kill_0; // @[ScratchpadSlavePort.scala:43:9]
assign io_dmem_s1_data_data = io_dmem_s1_data_data_0; // @[ScratchpadSlavePort.scala:43:9]
assign io_dmem_s1_data_mask = io_dmem_s1_data_mask_0; // @[ScratchpadSlavePort.scala:43:9]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Crossing.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.interrupts
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.util.{SynchronizerShiftReg, AsyncResetReg}
@deprecated("IntXing does not ensure interrupt source is glitch free. Use IntSyncSource and IntSyncSink", "rocket-chip 1.2")
class IntXing(sync: Int = 3)(implicit p: Parameters) extends LazyModule
{
val intnode = IntAdapterNode()
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
(intnode.in zip intnode.out) foreach { case ((in, _), (out, _)) =>
out := SynchronizerShiftReg(in, sync)
}
}
}
object IntSyncCrossingSource
{
def apply(alreadyRegistered: Boolean = false)(implicit p: Parameters) =
{
val intsource = LazyModule(new IntSyncCrossingSource(alreadyRegistered))
intsource.node
}
}
class IntSyncCrossingSource(alreadyRegistered: Boolean = false)(implicit p: Parameters) extends LazyModule
{
val node = IntSyncSourceNode(alreadyRegistered)
lazy val module = if (alreadyRegistered) (new ImplRegistered) else (new Impl)
class Impl extends LazyModuleImp(this) {
def outSize = node.out.headOption.map(_._1.sync.size).getOrElse(0)
override def desiredName = s"IntSyncCrossingSource_n${node.out.size}x${outSize}"
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
out.sync := AsyncResetReg(Cat(in.reverse)).asBools
}
}
class ImplRegistered extends LazyRawModuleImp(this) {
def outSize = node.out.headOption.map(_._1.sync.size).getOrElse(0)
override def desiredName = s"IntSyncCrossingSource_n${node.out.size}x${outSize}_Registered"
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
out.sync := in
}
}
}
object IntSyncCrossingSink
{
@deprecated("IntSyncCrossingSink which used the `sync` parameter to determine crossing type is deprecated. Use IntSyncAsyncCrossingSink, IntSyncRationalCrossingSink, or IntSyncSyncCrossingSink instead for > 1, 1, and 0 sync values respectively", "rocket-chip 1.2")
def apply(sync: Int = 3)(implicit p: Parameters) =
{
val intsink = LazyModule(new IntSyncAsyncCrossingSink(sync))
intsink.node
}
}
class IntSyncAsyncCrossingSink(sync: Int = 3)(implicit p: Parameters) extends LazyModule
{
val node = IntSyncSinkNode(sync)
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
override def desiredName = s"IntSyncAsyncCrossingSink_n${node.out.size}x${node.out.head._1.size}"
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
out := SynchronizerShiftReg(in.sync, sync)
}
}
}
object IntSyncAsyncCrossingSink
{
def apply(sync: Int = 3)(implicit p: Parameters) =
{
val intsink = LazyModule(new IntSyncAsyncCrossingSink(sync))
intsink.node
}
}
class IntSyncSyncCrossingSink()(implicit p: Parameters) extends LazyModule
{
val node = IntSyncSinkNode(0)
lazy val module = new Impl
class Impl extends LazyRawModuleImp(this) {
def outSize = node.out.headOption.map(_._1.size).getOrElse(0)
override def desiredName = s"IntSyncSyncCrossingSink_n${node.out.size}x${outSize}"
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
out := in.sync
}
}
}
object IntSyncSyncCrossingSink
{
def apply()(implicit p: Parameters) =
{
val intsink = LazyModule(new IntSyncSyncCrossingSink())
intsink.node
}
}
class IntSyncRationalCrossingSink()(implicit p: Parameters) extends LazyModule
{
val node = IntSyncSinkNode(1)
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
def outSize = node.out.headOption.map(_._1.size).getOrElse(0)
override def desiredName = s"IntSyncRationalCrossingSink_n${node.out.size}x${outSize}"
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
out := RegNext(in.sync)
}
}
}
object IntSyncRationalCrossingSink
{
def apply()(implicit p: Parameters) =
{
val intsink = LazyModule(new IntSyncRationalCrossingSink())
intsink.node
}
}
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
File MixedNode.scala:
package org.chipsalliance.diplomacy.nodes
import chisel3.{Data, DontCare, Wire}
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.{Field, Parameters}
import org.chipsalliance.diplomacy.ValName
import org.chipsalliance.diplomacy.sourceLine
/** One side metadata of a [[Dangle]].
*
* Describes one side of an edge going into or out of a [[BaseNode]].
*
* @param serial
* the global [[BaseNode.serial]] number of the [[BaseNode]] that this [[HalfEdge]] connects to.
* @param index
* the `index` in the [[BaseNode]]'s input or output port list that this [[HalfEdge]] belongs to.
*/
case class HalfEdge(serial: Int, index: Int) extends Ordered[HalfEdge] {
import scala.math.Ordered.orderingToOrdered
def compare(that: HalfEdge): Int = HalfEdge.unapply(this).compare(HalfEdge.unapply(that))
}
/** [[Dangle]] captures the `IO` information of a [[LazyModule]] and which two [[BaseNode]]s the [[Edges]]/[[Bundle]]
* connects.
*
* [[Dangle]]s are generated by [[BaseNode.instantiate]] using [[MixedNode.danglesOut]] and [[MixedNode.danglesIn]] ,
* [[LazyModuleImp.instantiate]] connects those that go to internal or explicit IO connections in a [[LazyModule]].
*
* @param source
* the source [[HalfEdge]] of this [[Dangle]], which captures the source [[BaseNode]] and the port `index` within
* that [[BaseNode]].
* @param sink
* sink [[HalfEdge]] of this [[Dangle]], which captures the sink [[BaseNode]] and the port `index` within that
* [[BaseNode]].
* @param flipped
* flip or not in [[AutoBundle.makeElements]]. If true this corresponds to `danglesOut`, if false it corresponds to
* `danglesIn`.
* @param dataOpt
* actual [[Data]] for the hardware connection. Can be empty if this belongs to a cloned module
*/
case class Dangle(source: HalfEdge, sink: HalfEdge, flipped: Boolean, name: String, dataOpt: Option[Data]) {
def data = dataOpt.get
}
/** [[Edges]] is a collection of parameters describing the functionality and connection for an interface, which is often
* derived from the interconnection protocol and can inform the parameterization of the hardware bundles that actually
* implement the protocol.
*/
case class Edges[EI, EO](in: Seq[EI], out: Seq[EO])
/** A field available in [[Parameters]] used to determine whether [[InwardNodeImp.monitor]] will be called. */
case object MonitorsEnabled extends Field[Boolean](true)
/** When rendering the edge in a graphical format, flip the order in which the edges' source and sink are presented.
*
* For example, when rendering graphML, yEd by default tries to put the source node vertically above the sink node, but
* [[RenderFlipped]] inverts this relationship. When a particular [[LazyModule]] contains both source nodes and sink
* nodes, flipping the rendering of one node's edge will usual produce a more concise visual layout for the
* [[LazyModule]].
*/
case object RenderFlipped extends Field[Boolean](false)
/** The sealed node class in the package, all node are derived from it.
*
* @param inner
* Sink interface implementation.
* @param outer
* Source interface implementation.
* @param valName
* val name of this node.
* @tparam DI
* Downward-flowing parameters received on the inner side of the node. It is usually a brunch of parameters
* describing the protocol parameters from a source. For an [[InwardNode]], it is determined by the connected
* [[OutwardNode]]. Since it can be connected to multiple sources, this parameter is always a Seq of source port
* parameters.
* @tparam UI
* Upward-flowing parameters generated by the inner side of the node. It is usually a brunch of parameters describing
* the protocol parameters of a sink. For an [[InwardNode]], it is determined itself.
* @tparam EI
* Edge Parameters describing a connection on the inner side of the node. It is usually a brunch of transfers
* specified for a sink according to protocol.
* @tparam BI
* Bundle type used when connecting to the inner side of the node. It is a hardware interface of this sink interface.
* It should extends from [[chisel3.Data]], which represents the real hardware.
* @tparam DO
* Downward-flowing parameters generated on the outer side of the node. It is usually a brunch of parameters
* describing the protocol parameters of a source. For an [[OutwardNode]], it is determined itself.
* @tparam UO
* Upward-flowing parameters received by the outer side of the node. It is usually a brunch of parameters describing
* the protocol parameters from a sink. For an [[OutwardNode]], it is determined by the connected [[InwardNode]].
* Since it can be connected to multiple sinks, this parameter is always a Seq of sink port parameters.
* @tparam EO
* Edge Parameters describing a connection on the outer side of the node. It is usually a brunch of transfers
* specified for a source according to protocol.
* @tparam BO
* Bundle type used when connecting to the outer side of the node. It is a hardware interface of this source
* interface. It should extends from [[chisel3.Data]], which represents the real hardware.
*
* @note
* Call Graph of [[MixedNode]]
* - line `─`: source is process by a function and generate pass to others
* - Arrow `→`: target of arrow is generated by source
*
* {{{
* (from the other node)
* ┌─────────────────────────────────────────────────────────[[InwardNode.uiParams]]─────────────┐
* ↓ │
* (binding node when elaboration) [[OutwardNode.uoParams]]────────────────────────[[MixedNode.mapParamsU]]→──────────┐ │
* [[InwardNode.accPI]] │ │ │
* │ │ (based on protocol) │
* │ │ [[MixedNode.inner.edgeI]] │
* │ │ ↓ │
* ↓ │ │ │
* (immobilize after elaboration) (inward port from [[OutwardNode]]) │ ↓ │
* [[InwardNode.iBindings]]──┐ [[MixedNode.iDirectPorts]]────────────────────→[[MixedNode.iPorts]] [[InwardNode.uiParams]] │
* │ │ ↑ │ │ │
* │ │ │ [[OutwardNode.doParams]] │ │
* │ │ │ (from the other node) │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* │ │ │ └────────┬──────────────┤ │
* │ │ │ │ │ │
* │ │ │ │ (based on protocol) │
* │ │ │ │ [[MixedNode.inner.edgeI]] │
* │ │ │ │ │ │
* │ │ (from the other node) │ ↓ │
* │ └───[[OutwardNode.oPortMapping]] [[OutwardNode.oStar]] │ [[MixedNode.edgesIn]]───┐ │
* │ ↑ ↑ │ │ ↓ │
* │ │ │ │ │ [[MixedNode.in]] │
* │ │ │ │ ↓ ↑ │
* │ (solve star connection) │ │ │ [[MixedNode.bundleIn]]──┘ │
* ├───[[MixedNode.resolveStar]]→─┼─────────────────────────────┤ └────────────────────────────────────┐ │
* │ │ │ [[MixedNode.bundleOut]]─┐ │ │
* │ │ │ ↑ ↓ │ │
* │ │ │ │ [[MixedNode.out]] │ │
* │ ↓ ↓ │ ↑ │ │
* │ ┌─────[[InwardNode.iPortMapping]] [[InwardNode.iStar]] [[MixedNode.edgesOut]]──┘ │ │
* │ │ (from the other node) ↑ │ │
* │ │ │ │ │ │
* │ │ │ [[MixedNode.outer.edgeO]] │ │
* │ │ │ (based on protocol) │ │
* │ │ │ │ │ │
* │ │ │ ┌────────────────────────────────────────┤ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* (immobilize after elaboration)│ ↓ │ │ │ │
* [[OutwardNode.oBindings]]─┘ [[MixedNode.oDirectPorts]]───→[[MixedNode.oPorts]] [[OutwardNode.doParams]] │ │
* ↑ (inward port from [[OutwardNode]]) │ │ │ │
* │ ┌─────────────────────────────────────────┤ │ │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* [[OutwardNode.accPO]] │ ↓ │ │ │
* (binding node when elaboration) │ [[InwardNode.diParams]]─────→[[MixedNode.mapParamsD]]────────────────────────────┘ │ │
* │ ↑ │ │
* │ └──────────────────────────────────────────────────────────────────────────────────────────┘ │
* └──────────────────────────────────────────────────────────────────────────────────────────────────────────┘
* }}}
*/
abstract class MixedNode[DI, UI, EI, BI <: Data, DO, UO, EO, BO <: Data](
val inner: InwardNodeImp[DI, UI, EI, BI],
val outer: OutwardNodeImp[DO, UO, EO, BO]
)(
implicit valName: ValName)
extends BaseNode
with NodeHandle[DI, UI, EI, BI, DO, UO, EO, BO]
with InwardNode[DI, UI, BI]
with OutwardNode[DO, UO, BO] {
// Generate a [[NodeHandle]] with inward and outward node are both this node.
val inward = this
val outward = this
/** Debug info of nodes binding. */
def bindingInfo: String = s"""$iBindingInfo
|$oBindingInfo
|""".stripMargin
/** Debug info of ports connecting. */
def connectedPortsInfo: String = s"""${oPorts.size} outward ports connected: [${oPorts.map(_._2.name).mkString(",")}]
|${iPorts.size} inward ports connected: [${iPorts.map(_._2.name).mkString(",")}]
|""".stripMargin
/** Debug info of parameters propagations. */
def parametersInfo: String = s"""${doParams.size} downstream outward parameters: [${doParams.mkString(",")}]
|${uoParams.size} upstream outward parameters: [${uoParams.mkString(",")}]
|${diParams.size} downstream inward parameters: [${diParams.mkString(",")}]
|${uiParams.size} upstream inward parameters: [${uiParams.mkString(",")}]
|""".stripMargin
/** For a given node, converts [[OutwardNode.accPO]] and [[InwardNode.accPI]] to [[MixedNode.oPortMapping]] and
* [[MixedNode.iPortMapping]].
*
* Given counts of known inward and outward binding and inward and outward star bindings, return the resolved inward
* stars and outward stars.
*
* This method will also validate the arguments and throw a runtime error if the values are unsuitable for this type
* of node.
*
* @param iKnown
* Number of known-size ([[BIND_ONCE]]) input bindings.
* @param oKnown
* Number of known-size ([[BIND_ONCE]]) output bindings.
* @param iStar
* Number of unknown size ([[BIND_STAR]]) input bindings.
* @param oStar
* Number of unknown size ([[BIND_STAR]]) output bindings.
* @return
* A Tuple of the resolved number of input and output connections.
*/
protected[diplomacy] def resolveStar(iKnown: Int, oKnown: Int, iStar: Int, oStar: Int): (Int, Int)
/** Function to generate downward-flowing outward params from the downward-flowing input params and the current output
* ports.
*
* @param n
* The size of the output sequence to generate.
* @param p
* Sequence of downward-flowing input parameters of this node.
* @return
* A `n`-sized sequence of downward-flowing output edge parameters.
*/
protected[diplomacy] def mapParamsD(n: Int, p: Seq[DI]): Seq[DO]
/** Function to generate upward-flowing input parameters from the upward-flowing output parameters [[uiParams]].
*
* @param n
* Size of the output sequence.
* @param p
* Upward-flowing output edge parameters.
* @return
* A n-sized sequence of upward-flowing input edge parameters.
*/
protected[diplomacy] def mapParamsU(n: Int, p: Seq[UO]): Seq[UI]
/** @return
* The sink cardinality of the node, the number of outputs bound with [[BIND_QUERY]] summed with inputs bound with
* [[BIND_STAR]].
*/
protected[diplomacy] lazy val sinkCard: Int = oBindings.count(_._3 == BIND_QUERY) + iBindings.count(_._3 == BIND_STAR)
/** @return
* The source cardinality of this node, the number of inputs bound with [[BIND_QUERY]] summed with the number of
* output bindings bound with [[BIND_STAR]].
*/
protected[diplomacy] lazy val sourceCard: Int =
iBindings.count(_._3 == BIND_QUERY) + oBindings.count(_._3 == BIND_STAR)
/** @return list of nodes involved in flex bindings with this node. */
protected[diplomacy] lazy val flexes: Seq[BaseNode] =
oBindings.filter(_._3 == BIND_FLEX).map(_._2) ++ iBindings.filter(_._3 == BIND_FLEX).map(_._2)
/** Resolves the flex to be either source or sink and returns the offset where the [[BIND_STAR]] operators begin
* greedily taking up the remaining connections.
*
* @return
* A value >= 0 if it is sink cardinality, a negative value for source cardinality. The magnitude of the return
* value is not relevant.
*/
protected[diplomacy] lazy val flexOffset: Int = {
/** Recursively performs a depth-first search of the [[flexes]], [[BaseNode]]s connected to this node with flex
* operators. The algorithm bottoms out when we either get to a node we have already visited or when we get to a
* connection that is not a flex and can set the direction for us. Otherwise, recurse by visiting the `flexes` of
* each node in the current set and decide whether they should be added to the set or not.
*
* @return
* the mapping of [[BaseNode]] indexed by their serial numbers.
*/
def DFS(v: BaseNode, visited: Map[Int, BaseNode]): Map[Int, BaseNode] = {
if (visited.contains(v.serial) || !v.flexibleArityDirection) {
visited
} else {
v.flexes.foldLeft(visited + (v.serial -> v))((sum, n) => DFS(n, sum))
}
}
/** Determine which [[BaseNode]] are involved in resolving the flex connections to/from this node.
*
* @example
* {{{
* a :*=* b :*=* c
* d :*=* b
* e :*=* f
* }}}
*
* `flexSet` for `a`, `b`, `c`, or `d` will be `Set(a, b, c, d)` `flexSet` for `e` or `f` will be `Set(e,f)`
*/
val flexSet = DFS(this, Map()).values
/** The total number of :*= operators where we're on the left. */
val allSink = flexSet.map(_.sinkCard).sum
/** The total number of :=* operators used when we're on the right. */
val allSource = flexSet.map(_.sourceCard).sum
require(
allSink == 0 || allSource == 0,
s"The nodes ${flexSet.map(_.name)} which are inter-connected by :*=* have ${allSink} :*= operators and ${allSource} :=* operators connected to them, making it impossible to determine cardinality inference direction."
)
allSink - allSource
}
/** @return A value >= 0 if it is sink cardinality, a negative value for source cardinality. */
protected[diplomacy] def edgeArityDirection(n: BaseNode): Int = {
if (flexibleArityDirection) flexOffset
else if (n.flexibleArityDirection) n.flexOffset
else 0
}
/** For a node which is connected between two nodes, select the one that will influence the direction of the flex
* resolution.
*/
protected[diplomacy] def edgeAritySelect(n: BaseNode, l: => Int, r: => Int): Int = {
val dir = edgeArityDirection(n)
if (dir < 0) l
else if (dir > 0) r
else 1
}
/** Ensure that the same node is not visited twice in resolving `:*=`, etc operators. */
private var starCycleGuard = false
/** Resolve all the star operators into concrete indicies. As connections are being made, some may be "star"
* connections which need to be resolved. In some way to determine how many actual edges they correspond to. We also
* need to build up the ranges of edges which correspond to each binding operator, so that We can apply the correct
* edge parameters and later build up correct bundle connections.
*
* [[oPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that oPort (binding
* operator). [[iPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that iPort
* (binding operator). [[oStar]]: `Int` the value to return for this node `N` for any `N :*= foo` or `N :*=* foo :*=
* bar` [[iStar]]: `Int` the value to return for this node `N` for any `foo :=* N` or `bar :=* foo :*=* N`
*/
protected[diplomacy] lazy val (
oPortMapping: Seq[(Int, Int)],
iPortMapping: Seq[(Int, Int)],
oStar: Int,
iStar: Int
) = {
try {
if (starCycleGuard) throw StarCycleException()
starCycleGuard = true
// For a given node N...
// Number of foo :=* N
// + Number of bar :=* foo :*=* N
val oStars = oBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) < 0)
}
// Number of N :*= foo
// + Number of N :*=* foo :*= bar
val iStars = iBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) > 0)
}
// 1 for foo := N
// + bar.iStar for bar :*= foo :*=* N
// + foo.iStar for foo :*= N
// + 0 for foo :=* N
val oKnown = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, 0, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => 0
}
}.sum
// 1 for N := foo
// + bar.oStar for N :*=* foo :=* bar
// + foo.oStar for N :=* foo
// + 0 for N :*= foo
val iKnown = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, 0)
case BIND_QUERY => n.oStar
case BIND_STAR => 0
}
}.sum
// Resolve star depends on the node subclass to implement the algorithm for this.
val (iStar, oStar) = resolveStar(iKnown, oKnown, iStars, oStars)
// Cumulative list of resolved outward binding range starting points
val oSum = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, oStar, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => oStar
}
}.scanLeft(0)(_ + _)
// Cumulative list of resolved inward binding range starting points
val iSum = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, iStar)
case BIND_QUERY => n.oStar
case BIND_STAR => iStar
}
}.scanLeft(0)(_ + _)
// Create ranges for each binding based on the running sums and return
// those along with resolved values for the star operations.
(oSum.init.zip(oSum.tail), iSum.init.zip(iSum.tail), oStar, iStar)
} catch {
case c: StarCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Sequence of inward ports.
*
* This should be called after all star bindings are resolved.
*
* Each element is: `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding.
* `n` Instance of inward node. `p` View of [[Parameters]] where this connection was made. `s` Source info where this
* connection was made in the source code.
*/
protected[diplomacy] lazy val oDirectPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] =
oBindings.flatMap { case (i, n, _, p, s) =>
// for each binding operator in this node, look at what it connects to
val (start, end) = n.iPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
/** Sequence of outward ports.
*
* This should be called after all star bindings are resolved.
*
* `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. `n` Instance of
* outward node. `p` View of [[Parameters]] where this connection was made. `s` [[SourceInfo]] where this connection
* was made in the source code.
*/
protected[diplomacy] lazy val iDirectPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] =
iBindings.flatMap { case (i, n, _, p, s) =>
// query this port index range of this node in the other side of node.
val (start, end) = n.oPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
// Ephemeral nodes ( which have non-None iForward/oForward) have in_degree = out_degree
// Thus, there must exist an Eulerian path and the below algorithms terminate
@scala.annotation.tailrec
private def oTrace(
tuple: (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)
): (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.iForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => oTrace((j, m, p, s))
}
}
@scala.annotation.tailrec
private def iTrace(
tuple: (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)
): (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.oForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => iTrace((j, m, p, s))
}
}
/** Final output ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - Numeric index of this binding in the [[InwardNode]] on the other end.
* - [[InwardNode]] on the other end of this binding.
* - A view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val oPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oDirectPorts.map(oTrace)
/** Final input ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - numeric index of this binding in [[OutwardNode]] on the other end.
* - [[OutwardNode]] on the other end of this binding.
* - a view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val iPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iDirectPorts.map(iTrace)
private var oParamsCycleGuard = false
protected[diplomacy] lazy val diParams: Seq[DI] = iPorts.map { case (i, n, _, _) => n.doParams(i) }
protected[diplomacy] lazy val doParams: Seq[DO] = {
try {
if (oParamsCycleGuard) throw DownwardCycleException()
oParamsCycleGuard = true
val o = mapParamsD(oPorts.size, diParams)
require(
o.size == oPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of outward ports should equal the number of produced outward parameters.
|$context
|$connectedPortsInfo
|Downstreamed inward parameters: [${diParams.mkString(",")}]
|Produced outward parameters: [${o.mkString(",")}]
|""".stripMargin
)
o.map(outer.mixO(_, this))
} catch {
case c: DownwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
private var iParamsCycleGuard = false
protected[diplomacy] lazy val uoParams: Seq[UO] = oPorts.map { case (o, n, _, _) => n.uiParams(o) }
protected[diplomacy] lazy val uiParams: Seq[UI] = {
try {
if (iParamsCycleGuard) throw UpwardCycleException()
iParamsCycleGuard = true
val i = mapParamsU(iPorts.size, uoParams)
require(
i.size == iPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of inward ports should equal the number of produced inward parameters.
|$context
|$connectedPortsInfo
|Upstreamed outward parameters: [${uoParams.mkString(",")}]
|Produced inward parameters: [${i.mkString(",")}]
|""".stripMargin
)
i.map(inner.mixI(_, this))
} catch {
case c: UpwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Outward edge parameters. */
protected[diplomacy] lazy val edgesOut: Seq[EO] =
(oPorts.zip(doParams)).map { case ((i, n, p, s), o) => outer.edgeO(o, n.uiParams(i), p, s) }
/** Inward edge parameters. */
protected[diplomacy] lazy val edgesIn: Seq[EI] =
(iPorts.zip(uiParams)).map { case ((o, n, p, s), i) => inner.edgeI(n.doParams(o), i, p, s) }
/** A tuple of the input edge parameters and output edge parameters for the edges bound to this node.
*
* If you need to access to the edges of a foreign Node, use this method (in/out create bundles).
*/
lazy val edges: Edges[EI, EO] = Edges(edgesIn, edgesOut)
/** Create actual Wires corresponding to the Bundles parameterized by the outward edges of this node. */
protected[diplomacy] lazy val bundleOut: Seq[BO] = edgesOut.map { e =>
val x = Wire(outer.bundleO(e)).suggestName(s"${valName.value}Out")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
/** Create actual Wires corresponding to the Bundles parameterized by the inward edges of this node. */
protected[diplomacy] lazy val bundleIn: Seq[BI] = edgesIn.map { e =>
val x = Wire(inner.bundleI(e)).suggestName(s"${valName.value}In")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
private def emptyDanglesOut: Seq[Dangle] = oPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(serial, i),
sink = HalfEdge(n.serial, j),
flipped = false,
name = wirePrefix + "out",
dataOpt = None
)
}
private def emptyDanglesIn: Seq[Dangle] = iPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(n.serial, j),
sink = HalfEdge(serial, i),
flipped = true,
name = wirePrefix + "in",
dataOpt = None
)
}
/** Create the [[Dangle]]s which describe the connections from this node output to other nodes inputs. */
protected[diplomacy] def danglesOut: Seq[Dangle] = emptyDanglesOut.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleOut(i)))
}
/** Create the [[Dangle]]s which describe the connections from this node input from other nodes outputs. */
protected[diplomacy] def danglesIn: Seq[Dangle] = emptyDanglesIn.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleIn(i)))
}
private[diplomacy] var instantiated = false
/** Gather Bundle and edge parameters of outward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def out: Seq[(BO, EO)] = {
require(
instantiated,
s"$name.out should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleOut.zip(edgesOut)
}
/** Gather Bundle and edge parameters of inward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def in: Seq[(BI, EI)] = {
require(
instantiated,
s"$name.in should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleIn.zip(edgesIn)
}
/** Actually instantiate this node during [[LazyModuleImp]] evaluation. Mark that it's safe to use the Bundle wires,
* instantiate monitors on all input ports if appropriate, and return all the dangles of this node.
*/
protected[diplomacy] def instantiate(): Seq[Dangle] = {
instantiated = true
if (!circuitIdentity) {
(iPorts.zip(in)).foreach { case ((_, _, p, _), (b, e)) => if (p(MonitorsEnabled)) inner.monitor(b, e) }
}
danglesOut ++ danglesIn
}
protected[diplomacy] def cloneDangles(): Seq[Dangle] = emptyDanglesOut ++ emptyDanglesIn
/** Connects the outward part of a node with the inward part of this node. */
protected[diplomacy] def bind(
h: OutwardNode[DI, UI, BI],
binding: NodeBinding
)(
implicit p: Parameters,
sourceInfo: SourceInfo
): Unit = {
val x = this // x := y
val y = h
sourceLine(sourceInfo, " at ", "")
val i = x.iPushed
val o = y.oPushed
y.oPush(
i,
x,
binding match {
case BIND_ONCE => BIND_ONCE
case BIND_FLEX => BIND_FLEX
case BIND_STAR => BIND_QUERY
case BIND_QUERY => BIND_STAR
}
)
x.iPush(o, y, binding)
}
/* Metadata for printing the node graph. */
def inputs: Seq[(OutwardNode[DI, UI, BI], RenderedEdge)] = (iPorts.zip(edgesIn)).map { case ((_, n, p, _), e) =>
val re = inner.render(e)
(n, re.copy(flipped = re.flipped != p(RenderFlipped)))
}
/** Metadata for printing the node graph */
def outputs: Seq[(InwardNode[DO, UO, BO], RenderedEdge)] = oPorts.map { case (i, n, _, _) => (n, n.inputs(i)._2) }
}
| module IntSyncSyncCrossingSink_n1x1_48( // @[Crossing.scala:96:9]
input auto_in_sync_0, // @[LazyModuleImp.scala:107:25]
output auto_out_0 // @[LazyModuleImp.scala:107:25]
);
wire auto_in_sync_0_0 = auto_in_sync_0; // @[Crossing.scala:96:9]
wire childClock = 1'h0; // @[LazyModuleImp.scala:155:31]
wire childReset = 1'h0; // @[LazyModuleImp.scala:158:31]
wire _childClock_T = 1'h0; // @[LazyModuleImp.scala:160:25]
wire nodeIn_sync_0 = auto_in_sync_0_0; // @[Crossing.scala:96:9]
wire nodeOut_0; // @[MixedNode.scala:542:17]
wire auto_out_0_0; // @[Crossing.scala:96:9]
assign nodeOut_0 = nodeIn_sync_0; // @[MixedNode.scala:542:17, :551:17]
assign auto_out_0_0 = nodeOut_0; // @[Crossing.scala:96:9]
assign auto_out_0 = auto_out_0_0; // @[Crossing.scala:96:9]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Fragmenter.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.diplomacy.{AddressSet, BufferParams, IdRange, TransferSizes}
import freechips.rocketchip.util.{Repeater, OH1ToUInt, UIntToOH1}
import scala.math.min
import freechips.rocketchip.util.DataToAugmentedData
object EarlyAck {
sealed trait T
case object AllPuts extends T
case object PutFulls extends T
case object None extends T
}
// minSize: minimum size of transfers supported by all outward managers
// maxSize: maximum size of transfers supported after the Fragmenter is applied
// alwaysMin: fragment all requests down to minSize (else fragment to maximum supported by manager)
// earlyAck: should a multibeat Put should be acknowledged on the first beat or last beat
// holdFirstDeny: allow the Fragmenter to unsafely combine multibeat Gets by taking the first denied for the whole burst
// nameSuffix: appends a suffix to the module name
// Fragmenter modifies: PutFull, PutPartial, LogicalData, Get, Hint
// Fragmenter passes: ArithmeticData (truncated to minSize if alwaysMin)
// Fragmenter cannot modify acquire (could livelock); thus it is unsafe to put caches on both sides
class TLFragmenter(val minSize: Int, val maxSize: Int, val alwaysMin: Boolean = false, val earlyAck: EarlyAck.T = EarlyAck.None, val holdFirstDeny: Boolean = false, val nameSuffix: Option[String] = None)(implicit p: Parameters) extends LazyModule
{
require(isPow2 (maxSize), s"TLFragmenter expects pow2(maxSize), but got $maxSize")
require(isPow2 (minSize), s"TLFragmenter expects pow2(minSize), but got $minSize")
require(minSize <= maxSize, s"TLFragmenter expects min <= max, but got $minSize > $maxSize")
val fragmentBits = log2Ceil(maxSize / minSize)
val fullBits = if (earlyAck == EarlyAck.PutFulls) 1 else 0
val toggleBits = 1
val addedBits = fragmentBits + toggleBits + fullBits
def expandTransfer(x: TransferSizes, op: String) = if (!x) x else {
// validate that we can apply the fragmenter correctly
require (x.max >= minSize, s"TLFragmenter (with parent $parent) max transfer size $op(${x.max}) must be >= min transfer size (${minSize})")
TransferSizes(x.min, maxSize)
}
private def noChangeRequired = minSize == maxSize
private def shrinkTransfer(x: TransferSizes) =
if (!alwaysMin) x
else if (x.min <= minSize) TransferSizes(x.min, min(minSize, x.max))
else TransferSizes.none
private def mapManager(m: TLSlaveParameters) = m.v1copy(
supportsArithmetic = shrinkTransfer(m.supportsArithmetic),
supportsLogical = shrinkTransfer(m.supportsLogical),
supportsGet = expandTransfer(m.supportsGet, "Get"),
supportsPutFull = expandTransfer(m.supportsPutFull, "PutFull"),
supportsPutPartial = expandTransfer(m.supportsPutPartial, "PutParital"),
supportsHint = expandTransfer(m.supportsHint, "Hint"))
val node = new TLAdapterNode(
// We require that all the responses are mutually FIFO
// Thus we need to compact all of the masters into one big master
clientFn = { c => (if (noChangeRequired) c else c.v2copy(
masters = Seq(TLMasterParameters.v2(
name = "TLFragmenter",
sourceId = IdRange(0, if (minSize == maxSize) c.endSourceId else (c.endSourceId << addedBits)),
requestFifo = true,
emits = TLMasterToSlaveTransferSizes(
acquireT = shrinkTransfer(c.masters.map(_.emits.acquireT) .reduce(_ mincover _)),
acquireB = shrinkTransfer(c.masters.map(_.emits.acquireB) .reduce(_ mincover _)),
arithmetic = shrinkTransfer(c.masters.map(_.emits.arithmetic).reduce(_ mincover _)),
logical = shrinkTransfer(c.masters.map(_.emits.logical) .reduce(_ mincover _)),
get = shrinkTransfer(c.masters.map(_.emits.get) .reduce(_ mincover _)),
putFull = shrinkTransfer(c.masters.map(_.emits.putFull) .reduce(_ mincover _)),
putPartial = shrinkTransfer(c.masters.map(_.emits.putPartial).reduce(_ mincover _)),
hint = shrinkTransfer(c.masters.map(_.emits.hint) .reduce(_ mincover _))
)
))
))},
managerFn = { m => if (noChangeRequired) m else m.v2copy(slaves = m.slaves.map(mapManager)) }
) {
override def circuitIdentity = noChangeRequired
}
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
override def desiredName = (Seq("TLFragmenter") ++ nameSuffix).mkString("_")
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
if (noChangeRequired) {
out <> in
} else {
// All managers must share a common FIFO domain (responses might end up interleaved)
val manager = edgeOut.manager
val managers = manager.managers
val beatBytes = manager.beatBytes
val fifoId = managers(0).fifoId
require (fifoId.isDefined && managers.map(_.fifoId == fifoId).reduce(_ && _))
require (!manager.anySupportAcquireB || !edgeOut.client.anySupportProbe,
s"TLFragmenter (with parent $parent) can't fragment a caching client's requests into a cacheable region")
require (minSize >= beatBytes, s"TLFragmenter (with parent $parent) can't support fragmenting ($minSize) to sub-beat ($beatBytes) accesses")
// We can't support devices which are cached on both sides of us
require (!edgeOut.manager.anySupportAcquireB || !edgeIn.client.anySupportProbe)
// We can't support denied because we reassemble fragments
require (!edgeOut.manager.mayDenyGet || holdFirstDeny, s"TLFragmenter (with parent $parent) can't support denials without holdFirstDeny=true")
require (!edgeOut.manager.mayDenyPut || earlyAck == EarlyAck.None)
/* The Fragmenter is a bit tricky, because there are 5 sizes in play:
* max size -- the maximum transfer size possible
* orig size -- the original pre-fragmenter size
* frag size -- the modified post-fragmenter size
* min size -- the threshold below which frag=orig
* beat size -- the amount transfered on any given beat
*
* The relationships are as follows:
* max >= orig >= frag
* max > min >= beat
* It IS possible that orig <= min (then frag=orig; ie: no fragmentation)
*
* The fragment# (sent via TL.source) is measured in multiples of min size.
* Meanwhile, to track the progress, counters measure in multiples of beat size.
*
* Here is an example of a bus with max=256, min=8, beat=4 and a device supporting 16.
*
* in.A out.A (frag#) out.D (frag#) in.D gen# ack#
* get64 get16 6 ackD16 6 ackD64 12 15
* ackD16 6 ackD64 14
* ackD16 6 ackD64 13
* ackD16 6 ackD64 12
* get16 4 ackD16 4 ackD64 8 11
* ackD16 4 ackD64 10
* ackD16 4 ackD64 9
* ackD16 4 ackD64 8
* get16 2 ackD16 2 ackD64 4 7
* ackD16 2 ackD64 6
* ackD16 2 ackD64 5
* ackD16 2 ackD64 4
* get16 0 ackD16 0 ackD64 0 3
* ackD16 0 ackD64 2
* ackD16 0 ackD64 1
* ackD16 0 ackD64 0
*
* get8 get8 0 ackD8 0 ackD8 0 1
* ackD8 0 ackD8 0
*
* get4 get4 0 ackD4 0 ackD4 0 0
* get1 get1 0 ackD1 0 ackD1 0 0
*
* put64 put16 6 15
* put64 put16 6 14
* put64 put16 6 13
* put64 put16 6 ack16 6 12 12
* put64 put16 4 11
* put64 put16 4 10
* put64 put16 4 9
* put64 put16 4 ack16 4 8 8
* put64 put16 2 7
* put64 put16 2 6
* put64 put16 2 5
* put64 put16 2 ack16 2 4 4
* put64 put16 0 3
* put64 put16 0 2
* put64 put16 0 1
* put64 put16 0 ack16 0 ack64 0 0
*
* put8 put8 0 1
* put8 put8 0 ack8 0 ack8 0 0
*
* put4 put4 0 ack4 0 ack4 0 0
* put1 put1 0 ack1 0 ack1 0 0
*/
val counterBits = log2Up(maxSize/beatBytes)
val maxDownSize = if (alwaysMin) minSize else min(manager.maxTransfer, maxSize)
// Consider the following waveform for two 4-beat bursts:
// ---A----A------------
// -------D-----DDD-DDDD
// Under TL rules, the second A can use the same source as the first A,
// because the source is released for reuse on the first response beat.
//
// However, if we fragment the requests, it looks like this:
// ---3210-3210---------
// -------3-----210-3210
// ... now we've broken the rules because 210 are twice inflight.
//
// This phenomenon means we can have essentially 2*maxSize/minSize-1
// fragmented transactions in flight per original transaction source.
//
// To keep the source unique, we encode the beat counter in the low
// bits of the source. To solve the overlap, we use a toggle bit.
// Whatever toggle bit the D is reassembling, A will use the opposite.
// First, handle the return path
val acknum = RegInit(0.U(counterBits.W))
val dOrig = Reg(UInt())
val dToggle = RegInit(false.B)
val dFragnum = out.d.bits.source(fragmentBits-1, 0)
val dFirst = acknum === 0.U
val dLast = dFragnum === 0.U // only for AccessAck (!Data)
val dsizeOH = UIntToOH (out.d.bits.size, log2Ceil(maxDownSize)+1)
val dsizeOH1 = UIntToOH1(out.d.bits.size, log2Up(maxDownSize))
val dHasData = edgeOut.hasData(out.d.bits)
// calculate new acknum
val acknum_fragment = dFragnum << log2Ceil(minSize/beatBytes)
val acknum_size = dsizeOH1 >> log2Ceil(beatBytes)
assert (!out.d.valid || (acknum_fragment & acknum_size) === 0.U)
val dFirst_acknum = acknum_fragment | Mux(dHasData, acknum_size, 0.U)
val ack_decrement = Mux(dHasData, 1.U, dsizeOH >> log2Ceil(beatBytes))
// calculate the original size
val dFirst_size = OH1ToUInt((dFragnum << log2Ceil(minSize)) | dsizeOH1)
when (out.d.fire) {
acknum := Mux(dFirst, dFirst_acknum, acknum - ack_decrement)
when (dFirst) {
dOrig := dFirst_size
dToggle := out.d.bits.source(fragmentBits)
}
}
// Swallow up non-data ack fragments
val doEarlyAck = earlyAck match {
case EarlyAck.AllPuts => true.B
case EarlyAck.PutFulls => out.d.bits.source(fragmentBits+1)
case EarlyAck.None => false.B
}
val drop = !dHasData && !Mux(doEarlyAck, dFirst, dLast)
out.d.ready := in.d.ready || drop
in.d.valid := out.d.valid && !drop
in.d.bits := out.d.bits // pass most stuff unchanged
in.d.bits.source := out.d.bits.source >> addedBits
in.d.bits.size := Mux(dFirst, dFirst_size, dOrig)
if (edgeOut.manager.mayDenyPut) {
val r_denied = Reg(Bool())
val d_denied = (!dFirst && r_denied) || out.d.bits.denied
when (out.d.fire) { r_denied := d_denied }
in.d.bits.denied := d_denied
}
if (edgeOut.manager.mayDenyGet) {
// Take denied only from the first beat and hold that value
val d_denied = out.d.bits.denied holdUnless dFirst
when (dHasData) {
in.d.bits.denied := d_denied
in.d.bits.corrupt := d_denied || out.d.bits.corrupt
}
}
// What maximum transfer sizes do downstream devices support?
val maxArithmetics = managers.map(_.supportsArithmetic.max)
val maxLogicals = managers.map(_.supportsLogical.max)
val maxGets = managers.map(_.supportsGet.max)
val maxPutFulls = managers.map(_.supportsPutFull.max)
val maxPutPartials = managers.map(_.supportsPutPartial.max)
val maxHints = managers.map(m => if (m.supportsHint) maxDownSize else 0)
// We assume that the request is valid => size 0 is impossible
val lgMinSize = log2Ceil(minSize).U
val maxLgArithmetics = maxArithmetics.map(m => if (m == 0) lgMinSize else log2Ceil(m).U)
val maxLgLogicals = maxLogicals .map(m => if (m == 0) lgMinSize else log2Ceil(m).U)
val maxLgGets = maxGets .map(m => if (m == 0) lgMinSize else log2Ceil(m).U)
val maxLgPutFulls = maxPutFulls .map(m => if (m == 0) lgMinSize else log2Ceil(m).U)
val maxLgPutPartials = maxPutPartials.map(m => if (m == 0) lgMinSize else log2Ceil(m).U)
val maxLgHints = maxHints .map(m => if (m == 0) lgMinSize else log2Ceil(m).U)
// Make the request repeatable
val repeater = Module(new Repeater(in.a.bits))
repeater.io.enq <> in.a
val in_a = repeater.io.deq
// If this is infront of a single manager, these become constants
val find = manager.findFast(edgeIn.address(in_a.bits))
val maxLgArithmetic = Mux1H(find, maxLgArithmetics)
val maxLgLogical = Mux1H(find, maxLgLogicals)
val maxLgGet = Mux1H(find, maxLgGets)
val maxLgPutFull = Mux1H(find, maxLgPutFulls)
val maxLgPutPartial = Mux1H(find, maxLgPutPartials)
val maxLgHint = Mux1H(find, maxLgHints)
val limit = if (alwaysMin) lgMinSize else
MuxLookup(in_a.bits.opcode, lgMinSize)(Array(
TLMessages.PutFullData -> maxLgPutFull,
TLMessages.PutPartialData -> maxLgPutPartial,
TLMessages.ArithmeticData -> maxLgArithmetic,
TLMessages.LogicalData -> maxLgLogical,
TLMessages.Get -> maxLgGet,
TLMessages.Hint -> maxLgHint))
val aOrig = in_a.bits.size
val aFrag = Mux(aOrig > limit, limit, aOrig)
val aOrigOH1 = UIntToOH1(aOrig, log2Ceil(maxSize))
val aFragOH1 = UIntToOH1(aFrag, log2Up(maxDownSize))
val aHasData = edgeIn.hasData(in_a.bits)
val aMask = Mux(aHasData, 0.U, aFragOH1)
val gennum = RegInit(0.U(counterBits.W))
val aFirst = gennum === 0.U
val old_gennum1 = Mux(aFirst, aOrigOH1 >> log2Ceil(beatBytes), gennum - 1.U)
val new_gennum = ~(~old_gennum1 | (aMask >> log2Ceil(beatBytes))) // ~(~x|y) is width safe
val aFragnum = ~(~(old_gennum1 >> log2Ceil(minSize/beatBytes)) | (aFragOH1 >> log2Ceil(minSize)))
val aLast = aFragnum === 0.U
val aToggle = !Mux(aFirst, dToggle, RegEnable(dToggle, aFirst))
val aFull = if (earlyAck == EarlyAck.PutFulls) Some(in_a.bits.opcode === TLMessages.PutFullData) else None
when (out.a.fire) { gennum := new_gennum }
repeater.io.repeat := !aHasData && aFragnum =/= 0.U
out.a <> in_a
out.a.bits.address := in_a.bits.address | ~(old_gennum1 << log2Ceil(beatBytes) | ~aOrigOH1 | aFragOH1 | (minSize-1).U)
out.a.bits.source := Cat(Seq(in_a.bits.source) ++ aFull ++ Seq(aToggle.asUInt, aFragnum))
out.a.bits.size := aFrag
// Optimize away some of the Repeater's registers
assert (!repeater.io.full || !aHasData)
out.a.bits.data := in.a.bits.data
val fullMask = ((BigInt(1) << beatBytes) - 1).U
assert (!repeater.io.full || in_a.bits.mask === fullMask)
out.a.bits.mask := Mux(repeater.io.full, fullMask, in.a.bits.mask)
out.a.bits.user.waiveAll :<= in.a.bits.user.subset(_.isData)
// Tie off unused channels
in.b.valid := false.B
in.c.ready := true.B
in.e.ready := true.B
out.b.ready := true.B
out.c.valid := false.B
out.e.valid := false.B
}
}
}
}
object TLFragmenter
{
def apply(minSize: Int, maxSize: Int, alwaysMin: Boolean = false, earlyAck: EarlyAck.T = EarlyAck.None, holdFirstDeny: Boolean = false, nameSuffix: Option[String] = None)(implicit p: Parameters): TLNode =
{
if (minSize <= maxSize) {
val fragmenter = LazyModule(new TLFragmenter(minSize, maxSize, alwaysMin, earlyAck, holdFirstDeny, nameSuffix))
fragmenter.node
} else { TLEphemeralNode()(ValName("no_fragmenter")) }
}
def apply(wrapper: TLBusWrapper, nameSuffix: Option[String])(implicit p: Parameters): TLNode = apply(wrapper.beatBytes, wrapper.blockBytes, nameSuffix = nameSuffix)
def apply(wrapper: TLBusWrapper)(implicit p: Parameters): TLNode = apply(wrapper, None)
}
// Synthesizable unit tests
import freechips.rocketchip.unittest._
class TLRAMFragmenter(ramBeatBytes: Int, maxSize: Int, txns: Int)(implicit p: Parameters) extends LazyModule {
val fuzz = LazyModule(new TLFuzzer(txns))
val model = LazyModule(new TLRAMModel("Fragmenter"))
val ram = LazyModule(new TLRAM(AddressSet(0x0, 0x3ff), beatBytes = ramBeatBytes))
(ram.node
:= TLDelayer(0.1)
:= TLBuffer(BufferParams.flow)
:= TLDelayer(0.1)
:= TLFragmenter(ramBeatBytes, maxSize, earlyAck = EarlyAck.AllPuts)
:= TLDelayer(0.1)
:= TLBuffer(BufferParams.flow)
:= TLFragmenter(ramBeatBytes, maxSize/2)
:= TLDelayer(0.1)
:= TLBuffer(BufferParams.flow)
:= model.node
:= fuzz.node)
lazy val module = new Impl
class Impl extends LazyModuleImp(this) with UnitTestModule {
io.finished := fuzz.module.io.finished
}
}
class TLRAMFragmenterTest(ramBeatBytes: Int, maxSize: Int, txns: Int = 5000, timeout: Int = 500000)(implicit p: Parameters) extends UnitTest(timeout) {
val dut = Module(LazyModule(new TLRAMFragmenter(ramBeatBytes,maxSize,txns)).module)
io.finished := dut.io.finished
dut.io.start := io.start
}
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
File Nodes.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.nodes._
import freechips.rocketchip.util.{AsyncQueueParams,RationalDirection}
case object TLMonitorBuilder extends Field[TLMonitorArgs => TLMonitorBase](args => new TLMonitor(args))
object TLImp extends NodeImp[TLMasterPortParameters, TLSlavePortParameters, TLEdgeOut, TLEdgeIn, TLBundle]
{
def edgeO(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeOut(pd, pu, p, sourceInfo)
def edgeI(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeIn (pd, pu, p, sourceInfo)
def bundleO(eo: TLEdgeOut) = TLBundle(eo.bundle)
def bundleI(ei: TLEdgeIn) = TLBundle(ei.bundle)
def render(ei: TLEdgeIn) = RenderedEdge(colour = "#000000" /* black */, label = (ei.manager.beatBytes * 8).toString)
override def monitor(bundle: TLBundle, edge: TLEdgeIn): Unit = {
val monitor = Module(edge.params(TLMonitorBuilder)(TLMonitorArgs(edge)))
monitor.io.in := bundle
}
override def mixO(pd: TLMasterPortParameters, node: OutwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLMasterPortParameters =
pd.v1copy(clients = pd.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })
override def mixI(pu: TLSlavePortParameters, node: InwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLSlavePortParameters =
pu.v1copy(managers = pu.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })
}
trait TLFormatNode extends FormatNode[TLEdgeIn, TLEdgeOut]
case class TLClientNode(portParams: Seq[TLMasterPortParameters])(implicit valName: ValName) extends SourceNode(TLImp)(portParams) with TLFormatNode
case class TLManagerNode(portParams: Seq[TLSlavePortParameters])(implicit valName: ValName) extends SinkNode(TLImp)(portParams) with TLFormatNode
case class TLAdapterNode(
clientFn: TLMasterPortParameters => TLMasterPortParameters = { s => s },
managerFn: TLSlavePortParameters => TLSlavePortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLImp)(clientFn, managerFn) with TLFormatNode
case class TLJunctionNode(
clientFn: Seq[TLMasterPortParameters] => Seq[TLMasterPortParameters],
managerFn: Seq[TLSlavePortParameters] => Seq[TLSlavePortParameters])(
implicit valName: ValName)
extends JunctionNode(TLImp)(clientFn, managerFn) with TLFormatNode
case class TLIdentityNode()(implicit valName: ValName) extends IdentityNode(TLImp)() with TLFormatNode
object TLNameNode {
def apply(name: ValName) = TLIdentityNode()(name)
def apply(name: Option[String]): TLIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLIdentityNode = apply(Some(name))
}
case class TLEphemeralNode()(implicit valName: ValName) extends EphemeralNode(TLImp)()
object TLTempNode {
def apply(): TLEphemeralNode = TLEphemeralNode()(ValName("temp"))
}
case class TLNexusNode(
clientFn: Seq[TLMasterPortParameters] => TLMasterPortParameters,
managerFn: Seq[TLSlavePortParameters] => TLSlavePortParameters)(
implicit valName: ValName)
extends NexusNode(TLImp)(clientFn, managerFn) with TLFormatNode
abstract class TLCustomNode(implicit valName: ValName)
extends CustomNode(TLImp) with TLFormatNode
// Asynchronous crossings
trait TLAsyncFormatNode extends FormatNode[TLAsyncEdgeParameters, TLAsyncEdgeParameters]
object TLAsyncImp extends SimpleNodeImp[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncEdgeParameters, TLAsyncBundle]
{
def edge(pd: TLAsyncClientPortParameters, pu: TLAsyncManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLAsyncEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLAsyncEdgeParameters) = new TLAsyncBundle(e.bundle)
def render(e: TLAsyncEdgeParameters) = RenderedEdge(colour = "#ff0000" /* red */, label = e.manager.async.depth.toString)
override def mixO(pd: TLAsyncClientPortParameters, node: OutwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLAsyncManagerPortParameters, node: InwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLAsyncAdapterNode(
clientFn: TLAsyncClientPortParameters => TLAsyncClientPortParameters = { s => s },
managerFn: TLAsyncManagerPortParameters => TLAsyncManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLAsyncImp)(clientFn, managerFn) with TLAsyncFormatNode
case class TLAsyncIdentityNode()(implicit valName: ValName) extends IdentityNode(TLAsyncImp)() with TLAsyncFormatNode
object TLAsyncNameNode {
def apply(name: ValName) = TLAsyncIdentityNode()(name)
def apply(name: Option[String]): TLAsyncIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLAsyncIdentityNode = apply(Some(name))
}
case class TLAsyncSourceNode(sync: Option[Int])(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLAsyncImp)(
dFn = { p => TLAsyncClientPortParameters(p) },
uFn = { p => p.base.v1copy(minLatency = p.base.minLatency + sync.getOrElse(p.async.sync)) }) with FormatNode[TLEdgeIn, TLAsyncEdgeParameters] // discard cycles in other clock domain
case class TLAsyncSinkNode(async: AsyncQueueParams)(implicit valName: ValName)
extends MixedAdapterNode(TLAsyncImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = p.base.minLatency + async.sync) },
uFn = { p => TLAsyncManagerPortParameters(async, p) }) with FormatNode[TLAsyncEdgeParameters, TLEdgeOut]
// Rationally related crossings
trait TLRationalFormatNode extends FormatNode[TLRationalEdgeParameters, TLRationalEdgeParameters]
object TLRationalImp extends SimpleNodeImp[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalEdgeParameters, TLRationalBundle]
{
def edge(pd: TLRationalClientPortParameters, pu: TLRationalManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLRationalEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLRationalEdgeParameters) = new TLRationalBundle(e.bundle)
def render(e: TLRationalEdgeParameters) = RenderedEdge(colour = "#00ff00" /* green */)
override def mixO(pd: TLRationalClientPortParameters, node: OutwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLRationalManagerPortParameters, node: InwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLRationalAdapterNode(
clientFn: TLRationalClientPortParameters => TLRationalClientPortParameters = { s => s },
managerFn: TLRationalManagerPortParameters => TLRationalManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLRationalImp)(clientFn, managerFn) with TLRationalFormatNode
case class TLRationalIdentityNode()(implicit valName: ValName) extends IdentityNode(TLRationalImp)() with TLRationalFormatNode
object TLRationalNameNode {
def apply(name: ValName) = TLRationalIdentityNode()(name)
def apply(name: Option[String]): TLRationalIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLRationalIdentityNode = apply(Some(name))
}
case class TLRationalSourceNode()(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLRationalImp)(
dFn = { p => TLRationalClientPortParameters(p) },
uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLRationalEdgeParameters] // discard cycles from other clock domain
case class TLRationalSinkNode(direction: RationalDirection)(implicit valName: ValName)
extends MixedAdapterNode(TLRationalImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = 1) },
uFn = { p => TLRationalManagerPortParameters(direction, p) }) with FormatNode[TLRationalEdgeParameters, TLEdgeOut]
// Credited version of TileLink channels
trait TLCreditedFormatNode extends FormatNode[TLCreditedEdgeParameters, TLCreditedEdgeParameters]
object TLCreditedImp extends SimpleNodeImp[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedEdgeParameters, TLCreditedBundle]
{
def edge(pd: TLCreditedClientPortParameters, pu: TLCreditedManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLCreditedEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLCreditedEdgeParameters) = new TLCreditedBundle(e.bundle)
def render(e: TLCreditedEdgeParameters) = RenderedEdge(colour = "#ffff00" /* yellow */, e.delay.toString)
override def mixO(pd: TLCreditedClientPortParameters, node: OutwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLCreditedManagerPortParameters, node: InwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLCreditedAdapterNode(
clientFn: TLCreditedClientPortParameters => TLCreditedClientPortParameters = { s => s },
managerFn: TLCreditedManagerPortParameters => TLCreditedManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLCreditedImp)(clientFn, managerFn) with TLCreditedFormatNode
case class TLCreditedIdentityNode()(implicit valName: ValName) extends IdentityNode(TLCreditedImp)() with TLCreditedFormatNode
object TLCreditedNameNode {
def apply(name: ValName) = TLCreditedIdentityNode()(name)
def apply(name: Option[String]): TLCreditedIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLCreditedIdentityNode = apply(Some(name))
}
case class TLCreditedSourceNode(delay: TLCreditedDelay)(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLCreditedImp)(
dFn = { p => TLCreditedClientPortParameters(delay, p) },
uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLCreditedEdgeParameters] // discard cycles from other clock domain
case class TLCreditedSinkNode(delay: TLCreditedDelay)(implicit valName: ValName)
extends MixedAdapterNode(TLCreditedImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = 1) },
uFn = { p => TLCreditedManagerPortParameters(delay, p) }) with FormatNode[TLCreditedEdgeParameters, TLEdgeOut]
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
| module TLFragmenter_TileClockGater( // @[Fragmenter.scala:92:9]
input clock, // @[Fragmenter.scala:92:9]
input reset, // @[Fragmenter.scala:92:9]
output auto_anon_in_a_ready, // @[LazyModuleImp.scala:107:25]
input auto_anon_in_a_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_anon_in_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_anon_in_a_bits_param, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_anon_in_a_bits_size, // @[LazyModuleImp.scala:107:25]
input [6:0] auto_anon_in_a_bits_source, // @[LazyModuleImp.scala:107:25]
input [20:0] auto_anon_in_a_bits_address, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_anon_in_a_bits_mask, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_anon_in_a_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_anon_in_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_anon_in_d_ready, // @[LazyModuleImp.scala:107:25]
output auto_anon_in_d_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_anon_in_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_anon_in_d_bits_size, // @[LazyModuleImp.scala:107:25]
output [6:0] auto_anon_in_d_bits_source, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_anon_in_d_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_anon_out_a_ready, // @[LazyModuleImp.scala:107:25]
output auto_anon_out_a_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_anon_out_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_anon_out_a_bits_param, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_anon_out_a_bits_size, // @[LazyModuleImp.scala:107:25]
output [10:0] auto_anon_out_a_bits_source, // @[LazyModuleImp.scala:107:25]
output [20:0] auto_anon_out_a_bits_address, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_anon_out_a_bits_mask, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_anon_out_a_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_anon_out_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output auto_anon_out_d_ready, // @[LazyModuleImp.scala:107:25]
input auto_anon_out_d_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_anon_out_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_anon_out_d_bits_size, // @[LazyModuleImp.scala:107:25]
input [10:0] auto_anon_out_d_bits_source, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_anon_out_d_bits_data // @[LazyModuleImp.scala:107:25]
);
wire _repeater_io_full; // @[Fragmenter.scala:274:30]
wire _repeater_io_enq_ready; // @[Fragmenter.scala:274:30]
wire _repeater_io_deq_valid; // @[Fragmenter.scala:274:30]
wire [2:0] _repeater_io_deq_bits_opcode; // @[Fragmenter.scala:274:30]
wire [2:0] _repeater_io_deq_bits_size; // @[Fragmenter.scala:274:30]
wire [6:0] _repeater_io_deq_bits_source; // @[Fragmenter.scala:274:30]
wire [20:0] _repeater_io_deq_bits_address; // @[Fragmenter.scala:274:30]
wire [7:0] _repeater_io_deq_bits_mask; // @[Fragmenter.scala:274:30]
reg [2:0] acknum; // @[Fragmenter.scala:201:29]
reg [2:0] dOrig; // @[Fragmenter.scala:202:24]
reg dToggle; // @[Fragmenter.scala:203:30]
wire dFirst = acknum == 3'h0; // @[Fragmenter.scala:201:29, :205:29]
wire [5:0] _dsizeOH1_T = 6'h7 << auto_anon_out_d_bits_size; // @[package.scala:243:71]
wire [2:0] _GEN = ~(auto_anon_out_d_bits_source[2:0]); // @[package.scala:241:49]
wire [2:0] dFirst_size_hi = auto_anon_out_d_bits_source[2:0] & {1'h1, _GEN[2:1]}; // @[OneHot.scala:30:18]
wire [2:0] _dFirst_size_T_8 = {1'h0, dFirst_size_hi[2:1]} | ~(_dsizeOH1_T[2:0]) & {_GEN[0], _dsizeOH1_T[2:1]}; // @[OneHot.scala:30:18, :31:18, :32:28]
wire [2:0] dFirst_size = {|dFirst_size_hi, |(_dFirst_size_T_8[2:1]), _dFirst_size_T_8[2] | _dFirst_size_T_8[0]}; // @[OneHot.scala:30:18, :31:18, :32:{10,14,28}]
wire drop = ~(auto_anon_out_d_bits_opcode[0]) & (|(auto_anon_out_d_bits_source[2:0])); // @[Fragmenter.scala:204:41, :206:30, :234:{20,30}]
wire anonOut_d_ready = auto_anon_in_d_ready | drop; // @[Fragmenter.scala:234:30, :235:35]
wire anonIn_d_valid = auto_anon_out_d_valid & ~drop; // @[Fragmenter.scala:234:30, :236:{36,39}]
wire [2:0] anonIn_d_bits_size = dFirst ? dFirst_size : dOrig; // @[OneHot.scala:32:10]
wire [12:0] _aOrigOH1_T = 13'h3F << _repeater_io_deq_bits_size; // @[package.scala:243:71]
reg [2:0] gennum; // @[Fragmenter.scala:303:29]
wire aFirst = gennum == 3'h0; // @[Fragmenter.scala:303:29, :304:29]
wire [2:0] aFragnum = aFirst ? ~(_aOrigOH1_T[5:3]) : gennum - 3'h1; // @[package.scala:243:{46,71,76}]
reg aToggle_r; // @[Fragmenter.scala:309:54] |
Generate the Verilog code corresponding to the following Chisel files.
File fetch-target-queue.scala:
//******************************************************************************
// Copyright (c) 2015 - 2019, The Regents of the University of California (Regents).
// All Rights Reserved. See LICENSE and LICENSE.SiFive for license details.
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
// Fetch Target Queue (FTQ)
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
//
// Each entry in the FTQ holds the fetch address and branch prediction snapshot state.
//
// TODO:
// * reduce port counts.
package boom.v4.ifu
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.{Parameters}
import freechips.rocketchip.util.{Str}
import boom.v4.common._
import boom.v4.exu._
import boom.v4.util._
/**
* FTQ Parameters used in configurations
*
* @param nEntries # of entries in the FTQ
*/
case class FtqParameters(
nEntries: Int = 16
)
/**
* Bundle to add to the FTQ RAM and to be used as the pass in IO
*/
class FTQBundle(implicit p: Parameters) extends BoomBundle
with HasBoomFrontendParameters
{
// IDX of instruction that was predicted taken, if any
val cfi_idx = Valid(UInt(log2Ceil(fetchWidth).W))
// Was the CFI in this bundle found to be taken? or not
val cfi_taken = Bool()
// Was this CFI mispredicted by the branch prediction pipeline?
val cfi_mispredicted = Bool()
// What type of CFI was taken out of this bundle
val cfi_type = UInt(CFI_SZ.W)
// mask of branches which were visible in this fetch bundle
val br_mask = UInt(fetchWidth.W)
// This CFI is likely a CALL
val cfi_is_call = Bool()
// This CFI is likely a RET
val cfi_is_ret = Bool()
// Is the NPC after the CFI +4 or +2
val cfi_npc_plus4 = Bool()
// What was the top of the RAS that this bundle saw?
val ras_top = UInt(vaddrBitsExtended.W)
val ras_idx = UInt(log2Ceil(nRasEntries).W)
// Which bank did this start from?
val start_bank = UInt(1.W)
}
class FTQInfo(implicit p: Parameters) extends BoomBundle
{
val valid = Bool()
val entry = new FTQBundle
val ghist = new GlobalHistory
val pc = UInt(vaddrBitsExtended.W)
}
/**
* Queue to store the fetch PC and other relevant branch predictor signals that are inflight in the
* processor.
*
* @param num_entries # of entries in the FTQ
*/
class FetchTargetQueue(implicit p: Parameters) extends BoomModule
with HasBoomCoreParameters
with HasBoomFrontendParameters
{
val num_entries = ftqSz
private val idx_sz = log2Ceil(num_entries)
val io = IO(new BoomBundle {
// Enqueue one entry for every fetch cycle.
val enq = Flipped(Decoupled(new FetchBundle()))
// Pass to FetchBuffer (newly fetched instructions).
val enq_idx = Output(UInt(idx_sz.W))
// ROB tells us the youngest committed ftq_idx to remove from FTQ.
val deq = Flipped(Valid(UInt(idx_sz.W)))
// Give PC info to BranchUnit.
val arb_ftq_reqs = Input(Vec(3, UInt(log2Ceil(ftqSz).W)))
val rrd_ftq_resps = Output(Vec(3, new FTQInfo))
val com_pc = Output(UInt(vaddrBitsExtended.W))
// Used to regenerate PC for trace port stuff in FireSim
// Don't tape this out, this blows up the FTQ
val debug_ftq_idx = Input(Vec(coreWidth, UInt(log2Ceil(ftqSz).W)))
val debug_fetch_pc = Output(Vec(coreWidth, UInt(vaddrBitsExtended.W)))
val redirect = Input(Valid(UInt(idx_sz.W)))
val brupdate = Input(new BrUpdateInfo)
val bpdupdate = Output(Valid(new BranchPredictionUpdate))
val ras_update = Output(Bool())
val ras_update_idx = Output(UInt(log2Ceil(nRasEntries).W))
val ras_update_pc = Output(UInt(vaddrBitsExtended.W))
})
val bpd_ptr = RegInit(0.U(idx_sz.W))
val deq_ptr = RegInit(0.U(idx_sz.W))
val enq_ptr = RegInit(1.U(idx_sz.W))
val full = ((WrapInc(WrapInc(enq_ptr, num_entries), num_entries) === bpd_ptr) ||
(WrapInc(enq_ptr, num_entries) === bpd_ptr))
val pcs = Reg(Vec(num_entries, UInt(vaddrBitsExtended.W)))
val meta = SyncReadMem(num_entries, Vec(nBanks, UInt(bpdMaxMetaLength.W)))
val ram = Reg(Vec(num_entries, new FTQBundle))
val ghist = Seq.fill(2) { SyncReadMem(num_entries, new GlobalHistory) }
val lhist = if (useLHist) {
Some(SyncReadMem(num_entries, Vec(nBanks, UInt(localHistoryLength.W))))
} else {
None
}
val do_enq = io.enq.fire
// This register lets us initialize the ghist to 0
val prev_ghist = RegInit((0.U).asTypeOf(new GlobalHistory))
val prev_entry = RegInit((0.U).asTypeOf(new FTQBundle))
val prev_pc = RegInit(0.U(vaddrBitsExtended.W))
when (do_enq) {
pcs(enq_ptr) := io.enq.bits.pc
val new_entry = Wire(new FTQBundle)
new_entry.cfi_idx := io.enq.bits.cfi_idx
// Initially, if we see a CFI, it is assumed to be taken.
// Branch resolutions may change this
new_entry.cfi_taken := io.enq.bits.cfi_idx.valid
new_entry.cfi_mispredicted := false.B
new_entry.cfi_type := io.enq.bits.cfi_type
new_entry.cfi_is_call := io.enq.bits.cfi_is_call
new_entry.cfi_is_ret := io.enq.bits.cfi_is_ret
new_entry.cfi_npc_plus4 := io.enq.bits.cfi_npc_plus4
new_entry.ras_top := io.enq.bits.ras_top
new_entry.ras_idx := io.enq.bits.ghist.ras_idx
new_entry.br_mask := io.enq.bits.br_mask & io.enq.bits.mask
new_entry.start_bank := bank(io.enq.bits.pc)
val new_ghist = Mux(io.enq.bits.ghist.current_saw_branch_not_taken,
io.enq.bits.ghist,
prev_ghist.update(
prev_entry.br_mask,
prev_entry.cfi_taken,
prev_entry.br_mask(prev_entry.cfi_idx.bits),
prev_entry.cfi_idx.bits,
prev_entry.cfi_idx.valid,
prev_pc,
prev_entry.cfi_is_call,
prev_entry.cfi_is_ret
)
)
lhist.map( l => l.write(enq_ptr, io.enq.bits.lhist))
ghist.map( g => g.write(enq_ptr, new_ghist))
meta.write(enq_ptr, io.enq.bits.bpd_meta)
ram(enq_ptr) := new_entry
prev_pc := io.enq.bits.pc
prev_entry := new_entry
prev_ghist := new_ghist
enq_ptr := WrapInc(enq_ptr, num_entries)
}
io.enq_idx := enq_ptr
io.bpdupdate.valid := false.B
io.bpdupdate.bits := DontCare
when (io.deq.valid) {
deq_ptr := io.deq.bits
}
// This register avoids a spurious bpd update on the first fetch packet
val first_empty = RegInit(true.B)
// We can update the branch predictors when we know the target of the
// CFI in this fetch bundle
val ras_update = WireInit(false.B)
val ras_update_pc = WireInit(0.U(vaddrBitsExtended.W))
val ras_update_idx = WireInit(0.U(log2Ceil(nRasEntries).W))
io.ras_update := RegNext(ras_update)
io.ras_update_pc := RegNext(ras_update_pc)
io.ras_update_idx := RegNext(ras_update_idx)
val bpd_update_mispredict = RegInit(false.B)
val bpd_update_repair = RegInit(false.B)
val bpd_repair_idx = Reg(UInt(log2Ceil(ftqSz).W))
val bpd_end_idx = Reg(UInt(log2Ceil(ftqSz).W))
val bpd_repair_pc = Reg(UInt(vaddrBitsExtended.W))
val bpd_idx = Mux(io.redirect.valid, io.redirect.bits,
Mux(bpd_update_repair || bpd_update_mispredict, bpd_repair_idx, bpd_ptr))
val bpd_entry = RegNext(ram(bpd_idx))
val bpd_ghist = ghist(0).read(bpd_idx, true.B)
val bpd_lhist = if (useLHist) {
lhist.get.read(bpd_idx, true.B)
} else {
VecInit(Seq.fill(nBanks) { 0.U })
}
val bpd_meta = meta.read(bpd_idx, true.B) // TODO fix these SRAMs
val bpd_pc = RegNext(pcs(bpd_idx))
val bpd_target = RegNext(pcs(WrapInc(bpd_idx, num_entries)))
when (io.redirect.valid) {
bpd_update_mispredict := false.B
bpd_update_repair := false.B
} .elsewhen (RegNext(io.brupdate.b2.mispredict)) {
bpd_update_mispredict := true.B
bpd_repair_idx := RegNext(io.brupdate.b2.uop.ftq_idx)
bpd_end_idx := RegNext(enq_ptr)
} .elsewhen (bpd_update_mispredict) {
bpd_update_mispredict := false.B
bpd_update_repair := true.B
bpd_repair_idx := WrapInc(bpd_repair_idx, num_entries)
} .elsewhen (bpd_update_repair && RegNext(bpd_update_mispredict)) {
bpd_repair_pc := bpd_pc
bpd_repair_idx := WrapInc(bpd_repair_idx, num_entries)
} .elsewhen (bpd_update_repair) {
bpd_repair_idx := WrapInc(bpd_repair_idx, num_entries)
when (WrapInc(bpd_repair_idx, num_entries) === bpd_end_idx ||
bpd_pc === bpd_repair_pc) {
bpd_update_repair := false.B
}
}
val do_commit_update = (!bpd_update_mispredict &&
!bpd_update_repair &&
bpd_ptr =/= deq_ptr &&
enq_ptr =/= WrapInc(bpd_ptr, num_entries) &&
!io.brupdate.b2.mispredict &&
!io.redirect.valid && !RegNext(io.redirect.valid))
val do_mispredict_update = bpd_update_mispredict
val do_repair_update = bpd_update_repair
when (RegNext(do_commit_update || do_repair_update || do_mispredict_update)) {
val cfi_idx = bpd_entry.cfi_idx.bits
val valid_repair = bpd_pc =/= bpd_repair_pc
io.bpdupdate.valid := (!first_empty &&
(bpd_entry.cfi_idx.valid || bpd_entry.br_mask =/= 0.U) &&
!(RegNext(do_repair_update) && !valid_repair))
io.bpdupdate.bits.is_mispredict_update := RegNext(do_mispredict_update)
io.bpdupdate.bits.is_repair_update := RegNext(do_repair_update)
io.bpdupdate.bits.pc := bpd_pc
io.bpdupdate.bits.btb_mispredicts := 0.U
io.bpdupdate.bits.br_mask := Mux(bpd_entry.cfi_idx.valid,
MaskLower(UIntToOH(cfi_idx)) & bpd_entry.br_mask, bpd_entry.br_mask)
io.bpdupdate.bits.cfi_idx := bpd_entry.cfi_idx
io.bpdupdate.bits.cfi_mispredicted := bpd_entry.cfi_mispredicted
io.bpdupdate.bits.cfi_taken := bpd_entry.cfi_taken
io.bpdupdate.bits.target := bpd_target
io.bpdupdate.bits.cfi_is_br := bpd_entry.br_mask(cfi_idx)
io.bpdupdate.bits.cfi_is_jal := bpd_entry.cfi_type === CFI_JAL || bpd_entry.cfi_type === CFI_JALR
io.bpdupdate.bits.ghist := bpd_ghist
io.bpdupdate.bits.lhist := bpd_lhist
io.bpdupdate.bits.meta := bpd_meta
first_empty := false.B
}
when (do_commit_update) {
bpd_ptr := WrapInc(bpd_ptr, num_entries)
}
io.enq.ready := RegNext(!full || do_commit_update)
val redirect_idx = io.redirect.bits
val redirect_entry = ram(redirect_idx)
val redirect_new_entry = WireInit(redirect_entry)
when (io.redirect.valid) {
enq_ptr := WrapInc(io.redirect.bits, num_entries)
when (io.brupdate.b2.mispredict) {
val new_cfi_idx = (io.brupdate.b2.uop.pc_lob ^
Mux(redirect_entry.start_bank === 1.U, 1.U << log2Ceil(bankBytes), 0.U))(log2Ceil(fetchWidth), 1)
redirect_new_entry.cfi_idx.valid := true.B
redirect_new_entry.cfi_idx.bits := new_cfi_idx
redirect_new_entry.cfi_mispredicted := true.B
redirect_new_entry.cfi_taken := io.brupdate.b2.taken
redirect_new_entry.cfi_is_call := redirect_entry.cfi_is_call && redirect_entry.cfi_idx.bits === new_cfi_idx
redirect_new_entry.cfi_is_ret := redirect_entry.cfi_is_ret && redirect_entry.cfi_idx.bits === new_cfi_idx
}
ras_update := true.B
ras_update_pc := redirect_entry.ras_top
ras_update_idx := redirect_entry.ras_idx
} .elsewhen (RegNext(io.redirect.valid)) {
prev_entry := RegNext(redirect_new_entry)
prev_ghist := bpd_ghist
prev_pc := bpd_pc
ram(RegNext(io.redirect.bits)) := RegNext(redirect_new_entry)
}
//-------------------------------------------------------------
// **** Core Read PCs ****
//-------------------------------------------------------------
for (i <- 0 until 3) {
val idx = Mux(reset.asBool, 0.U(log2Ceil(ftqSz).W), io.arb_ftq_reqs(i))
val is_enq = (idx === enq_ptr) && io.enq.fire
val get_entry = ram(idx)
io.rrd_ftq_resps(i).entry := RegNext(get_entry)
if (i == 0) {
io.rrd_ftq_resps(i).ghist := ghist(1).read(idx, true.B)
} else {
io.rrd_ftq_resps(i).ghist := DontCare
}
io.rrd_ftq_resps(i).pc := RegNext(Mux(is_enq, io.enq.bits.pc, pcs(idx)))
io.rrd_ftq_resps(i).valid := RegNext(idx =/= enq_ptr || is_enq)
}
io.com_pc := RegNext(pcs(Mux(io.deq.valid, io.deq.bits, deq_ptr)))
for (w <- 0 until coreWidth) {
io.debug_fetch_pc(w) := RegNext(pcs(io.debug_ftq_idx(w)))
}
}
| module meta( // @[fetch-target-queue.scala:129:29]
input [4:0] R0_addr,
input R0_clk,
output [239:0] R0_data,
input [4:0] W0_addr,
input W0_en,
input W0_clk,
input [239:0] W0_data
);
meta_ext meta_ext ( // @[fetch-target-queue.scala:129:29]
.R0_addr (R0_addr),
.R0_en (1'h1), // @[fetch-target-queue.scala:129:29]
.R0_clk (R0_clk),
.R0_data (R0_data),
.W0_addr (W0_addr),
.W0_en (W0_en),
.W0_clk (W0_clk),
.W0_data (W0_data)
); // @[fetch-target-queue.scala:129:29]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
File NBDcache.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.rocket
import chisel3._
import chisel3.util._
import chisel3.experimental.dataview._
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.tilelink._
import freechips.rocketchip.util._
trait HasMissInfo extends Bundle with HasL1HellaCacheParameters {
val tag_match = Bool()
val old_meta = new L1Metadata
val way_en = Bits(nWays.W)
}
class L1DataReadReq(implicit p: Parameters) extends L1HellaCacheBundle()(p) {
val way_en = Bits(nWays.W)
val addr = Bits(untagBits.W)
}
class L1DataWriteReq(implicit p: Parameters) extends L1DataReadReq()(p) {
val wmask = Bits(rowWords.W)
val data = Bits(encRowBits.W)
}
class L1RefillReq(implicit p: Parameters) extends L1DataReadReq()(p)
class Replay(implicit p: Parameters) extends HellaCacheReqInternal()(p) with HasCoreData
class ReplayInternal(implicit p: Parameters) extends HellaCacheReqInternal()(p)
with HasL1HellaCacheParameters {
val sdq_id = UInt(log2Up(cfg.nSDQ).W)
}
class MSHRReq(implicit p: Parameters) extends Replay()(p) with HasMissInfo
class MSHRReqInternal(implicit p: Parameters) extends ReplayInternal()(p) with HasMissInfo
class WritebackReq(params: TLBundleParameters)(implicit p: Parameters) extends L1HellaCacheBundle()(p) {
val tag = Bits(tagBits.W)
val idx = Bits(idxBits.W)
val source = UInt(params.sourceBits.W)
val param = UInt(TLPermissions.cWidth.W)
val way_en = Bits(nWays.W)
val voluntary = Bool()
}
class IOMSHR(id: Int)(implicit edge: TLEdgeOut, p: Parameters) extends L1HellaCacheModule()(p) {
val io = IO(new Bundle {
val req = Flipped(Decoupled(new HellaCacheReq))
val resp = Decoupled(new HellaCacheResp)
val mem_access = Decoupled(new TLBundleA(edge.bundle))
val mem_ack = Flipped(Valid(new TLBundleD(edge.bundle)))
val replay_next = Output(Bool())
val store_pending = Output(Bool())
})
def beatOffset(addr: UInt) = addr.extract(beatOffBits - 1, wordOffBits)
def wordFromBeat(addr: UInt, dat: UInt) = {
val shift = Cat(beatOffset(addr), 0.U((wordOffBits + log2Up(wordBytes)).W))
(dat >> shift)(wordBits - 1, 0)
}
val req = Reg(new HellaCacheReq)
val grant_word = Reg(UInt(wordBits.W))
val s_idle :: s_mem_access :: s_mem_ack :: s_resp_1 :: s_resp_2 :: Nil = Enum(5)
val state = RegInit(s_idle)
io.req.ready := (state === s_idle)
val loadgen = new LoadGen(req.size, req.signed, req.addr, grant_word, false.B, wordBytes)
val a_source = id.U
val a_address = req.addr
val a_size = req.size
val a_data = Fill(beatWords, req.data)
val get = edge.Get(a_source, a_address, a_size)._2
val put = edge.Put(a_source, a_address, a_size, a_data)._2
val atomics = if (edge.manager.anySupportLogical) {
MuxLookup(req.cmd, (0.U).asTypeOf(new TLBundleA(edge.bundle)))(Array(
M_XA_SWAP -> edge.Logical(a_source, a_address, a_size, a_data, TLAtomics.SWAP)._2,
M_XA_XOR -> edge.Logical(a_source, a_address, a_size, a_data, TLAtomics.XOR) ._2,
M_XA_OR -> edge.Logical(a_source, a_address, a_size, a_data, TLAtomics.OR) ._2,
M_XA_AND -> edge.Logical(a_source, a_address, a_size, a_data, TLAtomics.AND) ._2,
M_XA_ADD -> edge.Arithmetic(a_source, a_address, a_size, a_data, TLAtomics.ADD)._2,
M_XA_MIN -> edge.Arithmetic(a_source, a_address, a_size, a_data, TLAtomics.MIN)._2,
M_XA_MAX -> edge.Arithmetic(a_source, a_address, a_size, a_data, TLAtomics.MAX)._2,
M_XA_MINU -> edge.Arithmetic(a_source, a_address, a_size, a_data, TLAtomics.MINU)._2,
M_XA_MAXU -> edge.Arithmetic(a_source, a_address, a_size, a_data, TLAtomics.MAXU)._2))
} else {
// If no managers support atomics, assert fail if processor asks for them
assert(state === s_idle || !isAMO(req.cmd))
(0.U).asTypeOf(new TLBundleA(edge.bundle))
}
assert(state === s_idle || req.cmd =/= M_XSC)
io.mem_access.valid := (state === s_mem_access)
io.mem_access.bits := Mux(isAMO(req.cmd), atomics, Mux(isRead(req.cmd), get, put))
io.replay_next := state === s_resp_1 || (state === s_resp_2 && !io.resp.ready)
io.resp.valid := state === s_resp_2
io.resp.bits.addr := req.addr
io.resp.bits.idx.foreach(_ := req.idx.get)
io.resp.bits.tag := req.tag
io.resp.bits.cmd := req.cmd
io.resp.bits.size := req.size
io.resp.bits.signed := req.signed
io.resp.bits.dprv := req.dprv
io.resp.bits.dv := req.dv
io.resp.bits.mask := req.mask
io.resp.bits.has_data := isRead(req.cmd)
io.resp.bits.data := loadgen.data
io.resp.bits.data_raw := grant_word
io.resp.bits.data_word_bypass := loadgen.wordData
io.resp.bits.store_data := req.data
io.resp.bits.replay := true.B
io.store_pending := state =/= s_idle && isWrite(req.cmd)
when (io.req.fire) {
req := io.req.bits
state := s_mem_access
}
when (io.mem_access.fire) {
state := s_mem_ack
}
when (state === s_mem_ack && io.mem_ack.valid) {
state := Mux(req.no_resp || !isRead(req.cmd), s_idle, s_resp_1)
when (isRead(req.cmd)) {
grant_word := wordFromBeat(req.addr, io.mem_ack.bits.data)
}
}
when (state === s_resp_1) {
state := s_resp_2
}
when (io.resp.fire) {
state := s_idle
}
}
class MSHR(id: Int)(implicit edge: TLEdgeOut, p: Parameters) extends L1HellaCacheModule()(p) {
val io = IO(new Bundle {
val req_pri_val = Input(Bool())
val req_pri_rdy = Output(Bool())
val req_sec_val = Input(Bool())
val req_sec_rdy = Output(Bool())
val req_bits = Input(new MSHRReqInternal())
val idx_match = Output(Bool())
val tag = Output(Bits(tagBits.W))
val mem_acquire = Decoupled(new TLBundleA(edge.bundle))
val mem_grant = Flipped(Valid(new TLBundleD(edge.bundle)))
val mem_finish = Decoupled(new TLBundleE(edge.bundle))
val refill = Output(new L1RefillReq()) // Data is bypassed
val meta_read = Decoupled(new L1MetaReadReq)
val meta_write = Decoupled(new L1MetaWriteReq)
val replay = Decoupled(new ReplayInternal)
val wb_req = Decoupled(new WritebackReq(edge.bundle))
val probe_rdy = Output(Bool())
})
val s_invalid :: s_wb_req :: s_wb_resp :: s_meta_clear :: s_refill_req :: s_refill_resp :: s_meta_write_req :: s_meta_write_resp :: s_drain_rpq :: Nil = Enum(9)
val state = RegInit(s_invalid)
val req = Reg(new MSHRReqInternal)
val req_idx = req.addr(untagBits-1,blockOffBits)
val req_tag = req.addr >> untagBits
val req_block_addr = (req.addr >> blockOffBits) << blockOffBits
val idx_match = req_idx === io.req_bits.addr(untagBits-1,blockOffBits)
val new_coh = RegInit(ClientMetadata.onReset)
val (_, shrink_param, coh_on_clear) = req.old_meta.coh.onCacheControl(M_FLUSH)
val grow_param = new_coh.onAccess(req.cmd)._2
val coh_on_grant = new_coh.onGrant(req.cmd, io.mem_grant.bits.param)
// We only accept secondary misses if we haven't yet sent an Acquire to outer memory
// or if the Acquire that was sent will obtain a Grant with sufficient permissions
// to let us replay this new request. I.e. we don't handle multiple outstanding
// Acquires on the same block for now.
val (cmd_requires_second_acquire, is_hit_again, _, dirtier_coh, dirtier_cmd) =
new_coh.onSecondaryAccess(req.cmd, io.req_bits.cmd)
val states_before_refill = Seq(s_wb_req, s_wb_resp, s_meta_clear)
val (_, _, refill_done, refill_address_inc) = edge.addr_inc(io.mem_grant)
val sec_rdy = idx_match &&
(state.isOneOf(states_before_refill) ||
(state.isOneOf(s_refill_req, s_refill_resp) &&
!cmd_requires_second_acquire && !refill_done))
val rpq = Module(new Queue(new ReplayInternal, cfg.nRPQ))
rpq.io.enq.valid := (io.req_pri_val && io.req_pri_rdy || io.req_sec_val && sec_rdy) && !isPrefetch(io.req_bits.cmd)
rpq.io.enq.bits := io.req_bits
rpq.io.deq.ready := (io.replay.ready && state === s_drain_rpq) || state === s_invalid
val acked = Reg(Bool())
when (io.mem_grant.valid) { acked := true.B }
when (state === s_drain_rpq && !rpq.io.deq.valid) {
state := s_invalid
}
when (state === s_meta_write_resp) {
// this wait state allows us to catch RAW hazards on the tags via nack_victim
state := s_drain_rpq
}
when (state === s_meta_write_req && io.meta_write.ready) {
state := s_meta_write_resp
}
when (state === s_refill_resp && refill_done) {
new_coh := coh_on_grant
state := s_meta_write_req
}
when (io.mem_acquire.fire) { // s_refill_req
state := s_refill_resp
}
when (state === s_meta_clear && io.meta_write.ready) {
state := s_refill_req
}
when (state === s_wb_resp && io.wb_req.ready && acked) {
state := s_meta_clear
}
when (io.wb_req.fire) { // s_wb_req
state := s_wb_resp
}
when (io.req_sec_val && io.req_sec_rdy) { // s_wb_req, s_wb_resp, s_refill_req
//If we get a secondary miss that needs more permissions before we've sent
// out the primary miss's Acquire, we can upgrade the permissions we're
// going to ask for in s_refill_req
req.cmd := dirtier_cmd
when (is_hit_again) {
new_coh := dirtier_coh
}
}
when (io.req_pri_val && io.req_pri_rdy) {
req := io.req_bits
acked := false.B
val old_coh = io.req_bits.old_meta.coh
val needs_wb = old_coh.onCacheControl(M_FLUSH)._1
val (is_hit, _, coh_on_hit) = old_coh.onAccess(io.req_bits.cmd)
when (io.req_bits.tag_match) {
when (is_hit) { // set dirty bit
new_coh := coh_on_hit
state := s_meta_write_req
}.otherwise { // upgrade permissions
new_coh := old_coh
state := s_refill_req
}
}.otherwise { // writback if necessary and refill
new_coh := ClientMetadata.onReset
state := Mux(needs_wb, s_wb_req, s_meta_clear)
}
}
val grantackq = Module(new Queue(new TLBundleE(edge.bundle), 1))
val can_finish = state.isOneOf(s_invalid, s_refill_req)
grantackq.io.enq.valid := refill_done && edge.isRequest(io.mem_grant.bits)
grantackq.io.enq.bits := edge.GrantAck(io.mem_grant.bits)
io.mem_finish.valid := grantackq.io.deq.valid && can_finish
io.mem_finish.bits := grantackq.io.deq.bits
grantackq.io.deq.ready := io.mem_finish.ready && can_finish
io.idx_match := (state =/= s_invalid) && idx_match
io.refill.way_en := req.way_en
io.refill.addr := req_block_addr | refill_address_inc
io.tag := req_tag
io.req_pri_rdy := state === s_invalid
io.req_sec_rdy := sec_rdy && rpq.io.enq.ready
val meta_hazard = RegInit(0.U(2.W))
when (meta_hazard =/= 0.U) { meta_hazard := meta_hazard + 1.U }
when (io.meta_write.fire) { meta_hazard := 1.U }
io.probe_rdy := !idx_match || (!state.isOneOf(states_before_refill) && meta_hazard === 0.U)
io.meta_write.valid := state.isOneOf(s_meta_write_req, s_meta_clear)
io.meta_write.bits.idx := req_idx
io.meta_write.bits.tag := io.tag
io.meta_write.bits.data.coh := Mux(state === s_meta_clear, coh_on_clear, new_coh)
io.meta_write.bits.data.tag := io.tag
io.meta_write.bits.way_en := req.way_en
io.wb_req.valid := state === s_wb_req
io.wb_req.bits.source := id.U
io.wb_req.bits.tag := req.old_meta.tag
io.wb_req.bits.idx := req_idx
io.wb_req.bits.param := shrink_param
io.wb_req.bits.way_en := req.way_en
io.wb_req.bits.voluntary := true.B
io.mem_acquire.valid := state === s_refill_req && grantackq.io.enq.ready
io.mem_acquire.bits := edge.AcquireBlock(
fromSource = id.U,
toAddress = Cat(io.tag, req_idx) << blockOffBits,
lgSize = lgCacheBlockBytes.U,
growPermissions = grow_param)._2
io.meta_read.valid := state === s_drain_rpq
io.meta_read.bits.idx := req_idx
io.meta_read.bits.tag := io.tag
io.meta_read.bits.way_en := ~(0.U(nWays.W))
io.replay.valid := state === s_drain_rpq && rpq.io.deq.valid
io.replay.bits := rpq.io.deq.bits
io.replay.bits.phys := true.B
io.replay.bits.addr := Cat(io.tag, req_idx, rpq.io.deq.bits.addr(blockOffBits-1,0))
when (!io.meta_read.ready) {
rpq.io.deq.ready := false.B
io.replay.bits.cmd := M_FLUSH_ALL /* nop */
}
}
class MSHRFile(implicit edge: TLEdgeOut, p: Parameters) extends L1HellaCacheModule()(p) {
val io = IO(new Bundle {
val req = Flipped(Decoupled(new MSHRReq))
val resp = Decoupled(new HellaCacheResp)
val secondary_miss = Output(Bool())
val mem_acquire = Decoupled(new TLBundleA(edge.bundle))
val mem_grant = Flipped(Valid(new TLBundleD(edge.bundle)))
val mem_finish = Decoupled(new TLBundleE(edge.bundle))
val refill = Output(new L1RefillReq())
val meta_read = Decoupled(new L1MetaReadReq)
val meta_write = Decoupled(new L1MetaWriteReq)
val replay = Decoupled(new Replay)
val wb_req = Decoupled(new WritebackReq(edge.bundle))
val probe_rdy = Output(Bool())
val fence_rdy = Output(Bool())
val replay_next = Output(Bool())
val store_pending = Output(Bool())
})
// determine if the request is cacheable or not
val cacheable = edge.manager.supportsAcquireBFast(io.req.bits.addr, lgCacheBlockBytes.U)
val sdq_val = RegInit(0.U(cfg.nSDQ.W))
val sdq_alloc_id = PriorityEncoder(~sdq_val(cfg.nSDQ-1,0))
val sdq_rdy = !sdq_val.andR
val sdq_enq = io.req.valid && io.req.ready && cacheable && isWrite(io.req.bits.cmd)
val sdq = Mem(cfg.nSDQ, UInt(coreDataBits.W))
when (sdq_enq) { sdq(sdq_alloc_id) := io.req.bits.data }
val idxMatch = Wire(Vec(cfg.nMSHRs, Bool()))
val tagList = Wire(Vec(cfg.nMSHRs, Bits(tagBits.W)))
val tag_match = Mux1H(idxMatch, tagList) === io.req.bits.addr >> untagBits
val wbTagList = Wire(Vec(cfg.nMSHRs, Bits()))
val refillMux = Wire(Vec(cfg.nMSHRs, new L1RefillReq))
val meta_read_arb = Module(new Arbiter(new L1MetaReadReq, cfg.nMSHRs))
val meta_write_arb = Module(new Arbiter(new L1MetaWriteReq, cfg.nMSHRs))
val wb_req_arb = Module(new Arbiter(new WritebackReq(edge.bundle), cfg.nMSHRs))
val replay_arb = Module(new Arbiter(new ReplayInternal, cfg.nMSHRs))
val alloc_arb = Module(new Arbiter(Bool(), cfg.nMSHRs))
alloc_arb.io.in.foreach(_.bits := DontCare)
var idx_match = false.B
var pri_rdy = false.B
var sec_rdy = false.B
io.fence_rdy := true.B
io.probe_rdy := true.B
val mshrs = (0 until cfg.nMSHRs) map { i =>
val mshr = Module(new MSHR(i))
idxMatch(i) := mshr.io.idx_match
tagList(i) := mshr.io.tag
wbTagList(i) := mshr.io.wb_req.bits.tag
alloc_arb.io.in(i).valid := mshr.io.req_pri_rdy
mshr.io.req_pri_val := alloc_arb.io.in(i).ready
mshr.io.req_sec_val := io.req.valid && sdq_rdy && tag_match
mshr.io.req_bits.viewAsSupertype(new HellaCacheReqInternal) := io.req.bits.viewAsSupertype(new HellaCacheReqInternal)
mshr.io.req_bits.tag_match := io.req.bits.tag_match
mshr.io.req_bits.old_meta := io.req.bits.old_meta
mshr.io.req_bits.way_en := io.req.bits.way_en
mshr.io.req_bits.sdq_id := sdq_alloc_id
meta_read_arb.io.in(i) <> mshr.io.meta_read
meta_write_arb.io.in(i) <> mshr.io.meta_write
wb_req_arb.io.in(i) <> mshr.io.wb_req
replay_arb.io.in(i) <> mshr.io.replay
mshr.io.mem_grant.valid := io.mem_grant.valid && io.mem_grant.bits.source === i.U
mshr.io.mem_grant.bits := io.mem_grant.bits
refillMux(i) := mshr.io.refill
pri_rdy = pri_rdy || mshr.io.req_pri_rdy
sec_rdy = sec_rdy || mshr.io.req_sec_rdy
idx_match = idx_match || mshr.io.idx_match
when (!mshr.io.req_pri_rdy) { io.fence_rdy := false.B }
when (!mshr.io.probe_rdy) { io.probe_rdy := false.B }
mshr
}
alloc_arb.io.out.ready := io.req.valid && sdq_rdy && cacheable && !idx_match
io.meta_read <> meta_read_arb.io.out
io.meta_write <> meta_write_arb.io.out
io.wb_req <> wb_req_arb.io.out
val mmio_alloc_arb = Module(new Arbiter(Bool(), nIOMSHRs))
mmio_alloc_arb.io.in.foreach(_.bits := DontCare)
val resp_arb = Module(new Arbiter(new HellaCacheResp, nIOMSHRs))
var mmio_rdy = false.B
io.replay_next := false.B
val mmios = (0 until nIOMSHRs) map { i =>
val id = cfg.nMSHRs + i
val mshr = Module(new IOMSHR(id))
mmio_alloc_arb.io.in(i).valid := mshr.io.req.ready
mshr.io.req.valid := mmio_alloc_arb.io.in(i).ready
mshr.io.req.bits := io.req.bits
mmio_rdy = mmio_rdy || mshr.io.req.ready
mshr.io.mem_ack.bits := io.mem_grant.bits
mshr.io.mem_ack.valid := io.mem_grant.valid && io.mem_grant.bits.source === id.U
resp_arb.io.in(i) <> mshr.io.resp
when (!mshr.io.req.ready) { io.fence_rdy := false.B }
when (mshr.io.replay_next) { io.replay_next := true.B }
mshr
}
mmio_alloc_arb.io.out.ready := io.req.valid && !cacheable
TLArbiter.lowestFromSeq(edge, io.mem_acquire, mshrs.map(_.io.mem_acquire) ++ mmios.map(_.io.mem_access))
TLArbiter.lowestFromSeq(edge, io.mem_finish, mshrs.map(_.io.mem_finish))
io.store_pending := sdq_val =/= 0.U || mmios.map(_.io.store_pending).orR
io.resp <> resp_arb.io.out
io.req.ready := Mux(!cacheable,
mmio_rdy,
sdq_rdy && Mux(idx_match, tag_match && sec_rdy, pri_rdy))
io.secondary_miss := idx_match
io.refill := refillMux(io.mem_grant.bits.source)
val free_sdq = io.replay.fire && isWrite(io.replay.bits.cmd)
io.replay.bits.data := sdq(RegEnable(replay_arb.io.out.bits.sdq_id, free_sdq))
io.replay.bits.mask := 0.U
io.replay.valid := replay_arb.io.out.valid
replay_arb.io.out.ready := io.replay.ready
io.replay.bits.viewAsSupertype(new HellaCacheReqInternal) <> replay_arb.io.out.bits.viewAsSupertype(new HellaCacheReqInternal)
when (io.replay.valid || sdq_enq) {
sdq_val := sdq_val & ~(UIntToOH(replay_arb.io.out.bits.sdq_id) & Fill(cfg.nSDQ, free_sdq)) |
PriorityEncoderOH(~sdq_val(cfg.nSDQ-1,0)) & Fill(cfg.nSDQ, sdq_enq)
}
}
class WritebackUnit(implicit edge: TLEdgeOut, p: Parameters) extends L1HellaCacheModule()(p) {
val io = IO(new Bundle {
val req = Flipped(Decoupled(new WritebackReq(edge.bundle)))
val meta_read = Decoupled(new L1MetaReadReq)
val data_req = Decoupled(new L1DataReadReq)
val data_resp = Input(Bits(encRowBits.W))
val release = Decoupled(new TLBundleC(edge.bundle))
})
val req = Reg(new WritebackReq(edge.bundle))
val active = RegInit(false.B)
val r1_data_req_fired = RegInit(false.B)
val r2_data_req_fired = RegInit(false.B)
val data_req_cnt = RegInit(0.U(log2Up(refillCycles+1).W)) //TODO Zero width
val (_, last_beat, all_beats_done, beat_count) = edge.count(io.release)
io.release.valid := false.B
when (active) {
r1_data_req_fired := false.B
r2_data_req_fired := r1_data_req_fired
when (io.data_req.fire && io.meta_read.fire) {
r1_data_req_fired := true.B
data_req_cnt := data_req_cnt + 1.U
}
when (r2_data_req_fired) {
io.release.valid := true.B
when(!io.release.ready) {
r1_data_req_fired := false.B
r2_data_req_fired := false.B
data_req_cnt := data_req_cnt - Mux[UInt]((refillCycles > 1).B && r1_data_req_fired, 2.U, 1.U)
}
when(!r1_data_req_fired) {
// We're done if this is the final data request and the Release can be sent
active := data_req_cnt < refillCycles.U || !io.release.ready
}
}
}
when (io.req.fire) {
active := true.B
data_req_cnt := 0.U
req := io.req.bits
}
io.req.ready := !active
val fire = active && data_req_cnt < refillCycles.U
// We reissue the meta read as it sets up the mux ctrl for s2_data_muxed
io.meta_read.valid := fire
io.meta_read.bits.idx := req.idx
io.meta_read.bits.tag := req.tag
io.meta_read.bits.way_en := ~(0.U(nWays.W))
io.data_req.valid := fire
io.data_req.bits.way_en := req.way_en
io.data_req.bits.addr := (if(refillCycles > 1)
Cat(req.idx, data_req_cnt(log2Up(refillCycles)-1,0))
else req.idx) << rowOffBits
val r_address = Cat(req.tag, req.idx) << blockOffBits
val probeResponse = edge.ProbeAck(
fromSource = req.source,
toAddress = r_address,
lgSize = lgCacheBlockBytes.U,
reportPermissions = req.param,
data = io.data_resp)
val voluntaryRelease = edge.Release(
fromSource = req.source,
toAddress = r_address,
lgSize = lgCacheBlockBytes.U,
shrinkPermissions = req.param,
data = io.data_resp)._2
io.release.bits := Mux(req.voluntary, voluntaryRelease, probeResponse)
}
class ProbeUnit(implicit edge: TLEdgeOut, p: Parameters) extends L1HellaCacheModule()(p) {
val io = IO(new Bundle {
val req = Flipped(Decoupled(new TLBundleB(edge.bundle)))
val rep = Decoupled(new TLBundleC(edge.bundle))
val meta_read = Decoupled(new L1MetaReadReq)
val meta_write = Decoupled(new L1MetaWriteReq)
val wb_req = Decoupled(new WritebackReq(edge.bundle))
val way_en = Input(Bits(nWays.W))
val mshr_rdy = Input(Bool())
val block_state = Input(new ClientMetadata())
})
val (s_invalid :: s_meta_read :: s_meta_resp :: s_mshr_req ::
s_mshr_resp :: s_release :: s_writeback_req :: s_writeback_resp ::
s_meta_write :: Nil) = Enum(9)
val state = RegInit(s_invalid)
val req = Reg(new TLBundleB(edge.bundle))
val req_idx = req.address(idxMSB, idxLSB)
val req_tag = req.address >> untagBits
val way_en = Reg(Bits())
val tag_matches = way_en.orR
val old_coh = Reg(new ClientMetadata)
val miss_coh = ClientMetadata.onReset
val reply_coh = Mux(tag_matches, old_coh, miss_coh)
val (is_dirty, report_param, new_coh) = reply_coh.onProbe(req.param)
io.req.ready := state === s_invalid
io.rep.valid := state === s_release
io.rep.bits := edge.ProbeAck(req, report_param)
assert(!io.rep.valid || !edge.hasData(io.rep.bits),
"ProbeUnit should not send ProbeAcks with data, WritebackUnit should handle it")
io.meta_read.valid := state === s_meta_read
io.meta_read.bits.idx := req_idx
io.meta_read.bits.tag := req_tag
io.meta_read.bits.way_en := ~(0.U(nWays.W))
io.meta_write.valid := state === s_meta_write
io.meta_write.bits.way_en := way_en
io.meta_write.bits.idx := req_idx
io.meta_write.bits.tag := req_tag
io.meta_write.bits.data.tag := req_tag
io.meta_write.bits.data.coh := new_coh
io.wb_req.valid := state === s_writeback_req
io.wb_req.bits.source := req.source
io.wb_req.bits.idx := req_idx
io.wb_req.bits.tag := req_tag
io.wb_req.bits.param := report_param
io.wb_req.bits.way_en := way_en
io.wb_req.bits.voluntary := false.B
// state === s_invalid
when (io.req.fire) {
state := s_meta_read
req := io.req.bits
}
// state === s_meta_read
when (io.meta_read.fire) {
state := s_meta_resp
}
// we need to wait one cycle for the metadata to be read from the array
when (state === s_meta_resp) {
state := s_mshr_req
}
when (state === s_mshr_req) {
old_coh := io.block_state
way_en := io.way_en
// if the read didn't go through, we need to retry
state := Mux(io.mshr_rdy, s_mshr_resp, s_meta_read)
}
when (state === s_mshr_resp) {
state := Mux(tag_matches && is_dirty, s_writeback_req, s_release)
}
when (state === s_release && io.rep.ready) {
state := Mux(tag_matches, s_meta_write, s_invalid)
}
// state === s_writeback_req
when (io.wb_req.fire) {
state := s_writeback_resp
}
// wait for the writeback request to finish before updating the metadata
when (state === s_writeback_resp && io.wb_req.ready) {
state := s_meta_write
}
when (io.meta_write.fire) {
state := s_invalid
}
}
class DataArray(implicit p: Parameters) extends L1HellaCacheModule()(p) {
val io = IO(new Bundle {
val read = Flipped(Decoupled(new L1DataReadReq))
val write = Flipped(Decoupled(new L1DataWriteReq))
val resp = Output(Vec(nWays, Bits(encRowBits.W)))
})
val waddr = io.write.bits.addr >> rowOffBits
val raddr = io.read.bits.addr >> rowOffBits
if (doNarrowRead) {
for (w <- 0 until nWays by rowWords) {
val wway_en = io.write.bits.way_en(w+rowWords-1,w)
val rway_en = io.read.bits.way_en(w+rowWords-1,w)
val resp = Wire(Vec(rowWords, Bits(encRowBits.W)))
val r_raddr = RegEnable(io.read.bits.addr, io.read.valid)
for (i <- 0 until resp.size) {
val array = DescribedSRAM(
name = s"array_${w}_${i}",
desc = "Non-blocking DCache Data Array",
size = nSets * refillCycles,
data = Vec(rowWords, Bits(encDataBits.W))
)
when (wway_en.orR && io.write.valid && io.write.bits.wmask(i)) {
val data = VecInit.fill(rowWords)(io.write.bits.data(encDataBits*(i+1)-1,encDataBits*i))
array.write(waddr, data, wway_en.asBools)
}
resp(i) := array.read(raddr, rway_en.orR && io.read.valid).asUInt
}
for (dw <- 0 until rowWords) {
val r = VecInit(resp.map(_(encDataBits*(dw+1)-1,encDataBits*dw)))
val resp_mux =
if (r.size == 1) r
else VecInit(r(r_raddr(rowOffBits-1,wordOffBits)), r.tail:_*)
io.resp(w+dw) := resp_mux.asUInt
}
}
} else {
for (w <- 0 until nWays) {
val array = DescribedSRAM(
name = s"array_${w}",
desc = "Non-blocking DCache Data Array",
size = nSets * refillCycles,
data = Vec(rowWords, Bits(encDataBits.W))
)
when (io.write.bits.way_en(w) && io.write.valid) {
val data = VecInit.tabulate(rowWords)(i => io.write.bits.data(encDataBits*(i+1)-1,encDataBits*i))
array.write(waddr, data, io.write.bits.wmask.asBools)
}
io.resp(w) := array.read(raddr, io.read.bits.way_en(w) && io.read.valid).asUInt
}
}
io.read.ready := true.B
io.write.ready := true.B
}
class NonBlockingDCache(staticIdForMetadataUseOnly: Int)(implicit p: Parameters) extends HellaCache(staticIdForMetadataUseOnly)(p) {
override lazy val module = new NonBlockingDCacheModule(this)
}
class NonBlockingDCacheModule(outer: NonBlockingDCache) extends HellaCacheModule(outer) {
require(isPow2(nWays)) // TODO: relax this
require(dataScratchpadSize == 0)
require(!usingVM || untagBits <= pgIdxBits, s"untagBits($untagBits) > pgIdxBits($pgIdxBits)")
require(!cacheParams.separateUncachedResp)
// ECC is only supported on the data array
require(cacheParams.tagCode.isInstanceOf[IdentityCode])
val dECC = cacheParams.dataCode
io.cpu := DontCare
io.errors := DontCare
val wb = Module(new WritebackUnit)
val prober = Module(new ProbeUnit)
val mshrs = Module(new MSHRFile)
io.tlb_port.req.ready := true.B
io.cpu.req.ready := true.B
val s1_valid = RegNext(io.cpu.req.fire, false.B)
val s1_tlb_req_valid = RegNext(io.tlb_port.req.fire, false.B)
val s1_tlb_req = RegEnable(io.tlb_port.req.bits, io.tlb_port.req.fire)
val s1_req = Reg(new HellaCacheReq)
val s1_valid_masked = s1_valid && !io.cpu.s1_kill
val s1_replay = RegInit(false.B)
val s1_clk_en = Reg(Bool())
val s1_sfence = s1_req.cmd === M_SFENCE
val s2_valid = RegNext(s1_valid_masked && !s1_sfence, false.B) && !io.cpu.s2_xcpt.asUInt.orR
val s2_tlb_req_valid = RegNext(s1_tlb_req_valid, false.B)
val s2_req = Reg(new HellaCacheReq)
val s2_replay = RegNext(s1_replay, false.B) && s2_req.cmd =/= M_FLUSH_ALL
val s2_recycle = Wire(Bool())
val s2_valid_masked = Wire(Bool())
val s3_valid = RegInit(false.B)
val s3_req = Reg(new HellaCacheReq)
val s3_way = Reg(Bits())
val s1_recycled = RegEnable(s2_recycle, false.B, s1_clk_en)
val s1_read = isRead(s1_req.cmd)
val s1_write = isWrite(s1_req.cmd)
val s1_readwrite = s1_read || s1_write || isPrefetch(s1_req.cmd)
// check for unsupported operations
assert(!s1_valid || !s1_req.cmd.isOneOf(M_PWR))
val dtlb = Module(new TLB(false, log2Ceil(coreDataBytes), TLBConfig(nTLBSets, nTLBWays)))
io.ptw <> dtlb.io.ptw
dtlb.io.kill := io.cpu.s2_kill
dtlb.io.req.valid := s1_valid && !io.cpu.s1_kill && s1_readwrite
dtlb.io.req.bits.passthrough := s1_req.phys
dtlb.io.req.bits.vaddr := s1_req.addr
dtlb.io.req.bits.size := s1_req.size
dtlb.io.req.bits.cmd := s1_req.cmd
dtlb.io.req.bits.prv := s1_req.dprv
dtlb.io.req.bits.v := s1_req.dv
when (s1_tlb_req_valid) { dtlb.io.req.bits := s1_tlb_req }
when (!dtlb.io.req.ready && !io.cpu.req.bits.phys) { io.cpu.req.ready := false.B }
dtlb.io.sfence.valid := s1_valid && !io.cpu.s1_kill && s1_sfence
dtlb.io.sfence.bits.rs1 := s1_req.size(0)
dtlb.io.sfence.bits.rs2 := s1_req.size(1)
dtlb.io.sfence.bits.addr := s1_req.addr
dtlb.io.sfence.bits.asid := io.cpu.s1_data.data
dtlb.io.sfence.bits.hv := s1_req.cmd === M_HFENCEV
dtlb.io.sfence.bits.hg := s1_req.cmd === M_HFENCEG
when (io.cpu.req.valid) {
s1_req := io.cpu.req.bits
}
when (wb.io.meta_read.valid) {
s1_req.addr := Cat(wb.io.meta_read.bits.tag, wb.io.meta_read.bits.idx) << blockOffBits
s1_req.phys := true.B
}
when (prober.io.meta_read.valid) {
s1_req.addr := Cat(prober.io.meta_read.bits.tag, prober.io.meta_read.bits.idx) << blockOffBits
s1_req.phys := true.B
}
when (mshrs.io.replay.valid) {
s1_req := mshrs.io.replay.bits
}
when (s2_recycle) {
s1_req := s2_req
}
val s1_addr = Mux(s1_req.phys, s1_req.addr, dtlb.io.resp.paddr)
io.tlb_port.s1_resp := dtlb.io.resp
when (s1_clk_en) {
s2_req.size := s1_req.size
s2_req.signed := s1_req.signed
s2_req.phys := s1_req.phys
s2_req.addr := s1_addr
s2_req.no_resp := s1_req.no_resp
when (s1_write) {
s2_req.data := Mux(s1_replay, mshrs.io.replay.bits.data, io.cpu.s1_data.data)
}
when (s1_recycled) { s2_req.data := s1_req.data }
s2_req.tag := s1_req.tag
s2_req.cmd := s1_req.cmd
}
// tags
def onReset = L1Metadata(0.U, ClientMetadata.onReset)
val meta = Module(new L1MetadataArray(() => onReset ))
val metaReadArb = Module(new Arbiter(new L1MetaReadReq, 5))
val metaWriteArb = Module(new Arbiter(new L1MetaWriteReq, 2))
meta.io.read <> metaReadArb.io.out
meta.io.write <> metaWriteArb.io.out
// data
val data = Module(new DataArray)
val readArb = Module(new Arbiter(new L1DataReadReq, 4))
val writeArb = Module(new Arbiter(new L1DataWriteReq, 2))
data.io.write.valid := writeArb.io.out.valid
writeArb.io.out.ready := data.io.write.ready
data.io.write.bits := writeArb.io.out.bits
val wdata_encoded = (0 until rowWords).map(i => dECC.encode(writeArb.io.out.bits.data(coreDataBits*(i+1)-1,coreDataBits*i)))
data.io.write.bits.data := wdata_encoded.asUInt
// tag read for new requests
metaReadArb.io.in(4).valid := io.cpu.req.valid
metaReadArb.io.in(4).bits.idx := io.cpu.req.bits.addr >> blockOffBits
metaReadArb.io.in(4).bits.tag := io.cpu.req.bits.addr >> untagBits
metaReadArb.io.in(4).bits.way_en := ~0.U(nWays.W)
when (!metaReadArb.io.in(4).ready) { io.cpu.req.ready := false.B }
// data read for new requests
readArb.io.in(3).valid := io.cpu.req.valid
readArb.io.in(3).bits.addr := io.cpu.req.bits.addr
readArb.io.in(3).bits.way_en := ~0.U(nWays.W)
when (!readArb.io.in(3).ready) { io.cpu.req.ready := false.B }
// recycled requests
metaReadArb.io.in(0).valid := s2_recycle
metaReadArb.io.in(0).bits.idx := s2_req.addr >> blockOffBits
metaReadArb.io.in(0).bits.way_en := ~0.U(nWays.W)
metaReadArb.io.in(0).bits.tag := s2_req.tag
readArb.io.in(0).valid := s2_recycle
readArb.io.in(0).bits.addr := s2_req.addr
readArb.io.in(0).bits.way_en := ~0.U(nWays.W)
// tag check and way muxing
def wayMap[T <: Data](f: Int => T) = VecInit((0 until nWays).map(f))
val s1_tag_eq_way = wayMap((w: Int) => meta.io.resp(w).tag === (s1_addr >> untagBits)).asUInt
val s1_tag_match_way = wayMap((w: Int) => s1_tag_eq_way(w) && meta.io.resp(w).coh.isValid()).asUInt
s1_clk_en := metaReadArb.io.out.valid //TODO: should be metaReadArb.io.out.fire, but triggers Verilog backend bug
val s1_writeback = s1_clk_en && !s1_valid && !s1_replay
val s2_tag_match_way = RegEnable(s1_tag_match_way, s1_clk_en)
val s2_tag_match = s2_tag_match_way.orR
val s2_hit_state = Mux1H(s2_tag_match_way, wayMap((w: Int) => RegEnable(meta.io.resp(w).coh, s1_clk_en)))
val (s2_has_permission, _, s2_new_hit_state) = s2_hit_state.onAccess(s2_req.cmd)
val s2_hit = s2_tag_match && s2_has_permission && s2_hit_state === s2_new_hit_state
// load-reserved/store-conditional
val lrsc_count = RegInit(0.U)
val lrsc_valid = lrsc_count > lrscBackoff.U
val lrsc_addr = Reg(UInt())
val (s2_lr, s2_sc) = (s2_req.cmd === M_XLR, s2_req.cmd === M_XSC)
val s2_lrsc_addr_match = lrsc_valid && lrsc_addr === (s2_req.addr >> blockOffBits)
val s2_sc_fail = s2_sc && !s2_lrsc_addr_match
when (lrsc_count > 0.U) { lrsc_count := lrsc_count - 1.U }
when (s2_valid_masked && s2_hit || s2_replay) {
when (s2_lr) {
lrsc_count := lrscCycles.U - 1.U
lrsc_addr := s2_req.addr >> blockOffBits
}
when (lrsc_count > 0.U) {
lrsc_count := 0.U
}
}
when (s2_valid_masked && !(s2_tag_match && s2_has_permission) && s2_lrsc_addr_match) {
lrsc_count := 0.U
}
val s2_data = Wire(Vec(nWays, Bits(encRowBits.W)))
for (w <- 0 until nWays) {
val regs = Reg(Vec(rowWords, Bits(encDataBits.W)))
val en1 = s1_clk_en && s1_tag_eq_way(w)
for (i <- 0 until regs.size) {
val en = en1 && (((i == 0).B || !doNarrowRead.B) || s1_writeback)
when (en) { regs(i) := data.io.resp(w) >> encDataBits*i }
}
s2_data(w) := regs.asUInt
}
val s2_data_muxed = Mux1H(s2_tag_match_way, s2_data)
val s2_data_decoded = (0 until rowWords).map(i => dECC.decode(s2_data_muxed(encDataBits*(i+1)-1,encDataBits*i)))
val s2_data_corrected = s2_data_decoded.map(_.corrected).asUInt
val s2_data_uncorrected = s2_data_decoded.map(_.uncorrected).asUInt
val s2_word_idx = if(doNarrowRead) 0.U else s2_req.addr(log2Up(rowWords*coreDataBytes)-1,log2Up(wordBytes))
val s2_data_correctable = s2_data_decoded.map(_.correctable).asUInt(s2_word_idx)
// store/amo hits
s3_valid := (s2_valid_masked && s2_hit || s2_replay) && !s2_sc_fail && isWrite(s2_req.cmd)
val amoalu = Module(new AMOALU(xLen))
when ((s2_valid || s2_replay) && (isWrite(s2_req.cmd) || s2_data_correctable)) {
s3_req := s2_req
s3_req.data := Mux(s2_data_correctable, s2_data_corrected, amoalu.io.out)
s3_way := s2_tag_match_way
}
writeArb.io.in(0).bits.addr := s3_req.addr
writeArb.io.in(0).bits.wmask := UIntToOH(s3_req.addr.extract(rowOffBits-1,offsetlsb))
writeArb.io.in(0).bits.data := Fill(rowWords, s3_req.data)
writeArb.io.in(0).valid := s3_valid
writeArb.io.in(0).bits.way_en := s3_way
// replacement policy
val replacer = cacheParams.replacement
val s1_replaced_way_en = UIntToOH(replacer.way)
val s2_replaced_way_en = UIntToOH(RegEnable(replacer.way, s1_clk_en))
val s2_repl_meta = Mux1H(s2_replaced_way_en, wayMap((w: Int) => RegEnable(meta.io.resp(w), s1_clk_en && s1_replaced_way_en(w))).toSeq)
// miss handling
mshrs.io.req.valid := s2_valid_masked && !s2_hit && (isPrefetch(s2_req.cmd) || isRead(s2_req.cmd) || isWrite(s2_req.cmd))
mshrs.io.req.bits.viewAsSupertype(new Replay) := s2_req.viewAsSupertype(new HellaCacheReq)
mshrs.io.req.bits.tag_match := s2_tag_match
mshrs.io.req.bits.old_meta := Mux(s2_tag_match, L1Metadata(s2_repl_meta.tag, s2_hit_state), s2_repl_meta)
mshrs.io.req.bits.way_en := Mux(s2_tag_match, s2_tag_match_way, s2_replaced_way_en)
mshrs.io.req.bits.data := s2_req.data
when (mshrs.io.req.fire) { replacer.miss }
tl_out.a <> mshrs.io.mem_acquire
// replays
readArb.io.in(1).valid := mshrs.io.replay.valid
readArb.io.in(1).bits.addr := mshrs.io.replay.bits.addr
readArb.io.in(1).bits.way_en := ~0.U(nWays.W)
mshrs.io.replay.ready := readArb.io.in(1).ready
s1_replay := mshrs.io.replay.valid && readArb.io.in(1).ready
metaReadArb.io.in(1) <> mshrs.io.meta_read
metaWriteArb.io.in(0) <> mshrs.io.meta_write
// probes and releases
prober.io.req.valid := tl_out.b.valid && !lrsc_valid
tl_out.b.ready := prober.io.req.ready && !lrsc_valid
prober.io.req.bits := tl_out.b.bits
prober.io.way_en := s2_tag_match_way
prober.io.block_state := s2_hit_state
metaReadArb.io.in(2) <> prober.io.meta_read
metaWriteArb.io.in(1) <> prober.io.meta_write
prober.io.mshr_rdy := mshrs.io.probe_rdy
// refills
val grant_has_data = edge.hasData(tl_out.d.bits)
mshrs.io.mem_grant.valid := tl_out.d.fire
mshrs.io.mem_grant.bits := tl_out.d.bits
tl_out.d.ready := writeArb.io.in(1).ready || !grant_has_data
/* The last clause here is necessary in order to prevent the responses for
* the IOMSHRs from being written into the data array. It works because the
* IOMSHR ids start right the ones for the regular MSHRs. */
writeArb.io.in(1).valid := tl_out.d.valid && grant_has_data &&
tl_out.d.bits.source < cfg.nMSHRs.U
writeArb.io.in(1).bits.addr := mshrs.io.refill.addr
writeArb.io.in(1).bits.way_en := mshrs.io.refill.way_en
writeArb.io.in(1).bits.wmask := ~0.U(rowWords.W)
writeArb.io.in(1).bits.data := tl_out.d.bits.data(encRowBits-1,0)
data.io.read <> readArb.io.out
readArb.io.out.ready := !tl_out.d.valid || tl_out.d.ready // insert bubble if refill gets blocked
tl_out.e <> mshrs.io.mem_finish
// writebacks
val wbArb = Module(new Arbiter(new WritebackReq(edge.bundle), 2))
wbArb.io.in(0) <> prober.io.wb_req
wbArb.io.in(1) <> mshrs.io.wb_req
wb.io.req <> wbArb.io.out
metaReadArb.io.in(3) <> wb.io.meta_read
readArb.io.in(2) <> wb.io.data_req
wb.io.data_resp := s2_data_corrected
TLArbiter.lowest(edge, tl_out.c, wb.io.release, prober.io.rep)
// store->load bypassing
val s4_valid = RegNext(s3_valid, false.B)
val s4_req = RegEnable(s3_req, s3_valid && metaReadArb.io.out.valid)
val bypasses = List(
((s2_valid_masked || s2_replay) && !s2_sc_fail, s2_req, amoalu.io.out),
(s3_valid, s3_req, s3_req.data),
(s4_valid, s4_req, s4_req.data)
).map(r => (r._1 && (s1_addr >> wordOffBits === r._2.addr >> wordOffBits) && isWrite(r._2.cmd), r._3))
val s2_store_bypass_data = Reg(Bits(coreDataBits.W))
val s2_store_bypass = Reg(Bool())
when (s1_clk_en) {
s2_store_bypass := false.B
when (bypasses.map(_._1).reduce(_||_)) {
s2_store_bypass_data := PriorityMux(bypasses)
s2_store_bypass := true.B
}
}
// load data subword mux/sign extension
val s2_data_word_prebypass = s2_data_uncorrected >> Cat(s2_word_idx, 0.U(log2Up(coreDataBits).W))
val s2_data_word = Mux(s2_store_bypass, s2_store_bypass_data, s2_data_word_prebypass)
val loadgen = new LoadGen(s2_req.size, s2_req.signed, s2_req.addr, s2_data_word, s2_sc, wordBytes)
amoalu.io.mask := new StoreGen(s2_req.size, s2_req.addr, 0.U, xLen/8).mask
amoalu.io.cmd := s2_req.cmd
amoalu.io.lhs := s2_data_word
amoalu.io.rhs := s2_req.data
// nack it like it's hot
val s1_nack = dtlb.io.req.valid && dtlb.io.resp.miss || io.cpu.s2_nack || s1_tlb_req_valid ||
s1_req.addr(idxMSB,idxLSB) === prober.io.meta_write.bits.idx && !prober.io.req.ready
val s2_nack_hit = RegEnable(s1_nack, s1_valid || s1_replay)
when (s2_nack_hit) { mshrs.io.req.valid := false.B }
val s2_nack_victim = s2_hit && mshrs.io.secondary_miss
val s2_nack_miss = !s2_hit && !mshrs.io.req.ready
val s2_nack = s2_nack_hit || s2_nack_victim || s2_nack_miss
s2_valid_masked := s2_valid && !s2_nack && !io.cpu.s2_kill
val s2_recycle_ecc = (s2_valid || s2_replay) && s2_hit && s2_data_correctable
val s2_recycle_next = RegInit(false.B)
when (s1_valid || s1_replay) { s2_recycle_next := s2_recycle_ecc }
s2_recycle := s2_recycle_ecc || s2_recycle_next
// after a nack, block until nack condition resolves to save energy
val block_miss = RegInit(false.B)
block_miss := (s2_valid || block_miss) && s2_nack_miss
when (block_miss || s1_nack) {
io.cpu.req.ready := false.B
}
val cache_resp = Wire(Valid(new HellaCacheResp))
cache_resp.valid := (s2_replay || s2_valid_masked && s2_hit) && !s2_data_correctable
cache_resp.bits.addr := s2_req.addr
cache_resp.bits.idx.foreach(_ := s2_req.idx.get)
cache_resp.bits.tag := s2_req.tag
cache_resp.bits.cmd := s2_req.cmd
cache_resp.bits.size := s2_req.size
cache_resp.bits.signed := s2_req.signed
cache_resp.bits.dprv := s2_req.dprv
cache_resp.bits.dv := s2_req.dv
cache_resp.bits.data_word_bypass := loadgen.wordData
cache_resp.bits.data_raw := s2_data_word
cache_resp.bits.mask := s2_req.mask
cache_resp.bits.has_data := isRead(s2_req.cmd)
cache_resp.bits.data := loadgen.data | s2_sc_fail
cache_resp.bits.store_data := s2_req.data
cache_resp.bits.replay := s2_replay
val uncache_resp = Wire(Valid(new HellaCacheResp))
uncache_resp.bits := mshrs.io.resp.bits
uncache_resp.valid := mshrs.io.resp.valid
mshrs.io.resp.ready := RegNext(!(s1_valid || s1_replay))
io.cpu.s2_nack := s2_valid && s2_nack
io.cpu.resp := Mux(mshrs.io.resp.ready, uncache_resp, cache_resp)
io.cpu.resp.bits.data_word_bypass := loadgen.wordData
io.cpu.resp.bits.data_raw := s2_data_word
io.cpu.ordered := mshrs.io.fence_rdy && !s1_valid && !s2_valid
io.cpu.store_pending := mshrs.io.store_pending
io.cpu.replay_next := (s1_replay && s1_read) || mshrs.io.replay_next
val s1_xcpt_valid = dtlb.io.req.valid && !s1_nack
val s1_xcpt = dtlb.io.resp
io.cpu.s2_xcpt := Mux(RegNext(s1_xcpt_valid), RegEnable(s1_xcpt, s1_clk_en), 0.U.asTypeOf(s1_xcpt))
io.cpu.s2_uncached := false.B
io.cpu.s2_paddr := s2_req.addr
// performance events
io.cpu.perf.acquire := edge.done(tl_out.a)
io.cpu.perf.release := edge.done(tl_out.c)
io.cpu.perf.tlbMiss := io.ptw.req.fire
// no clock-gating support
io.cpu.clock_enabled := true.B
}
File Parameters.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy.nodes._
import freechips.rocketchip.diplomacy.{
AddressDecoder, AddressSet, BufferParams, DirectedBuffers, IdMap, IdMapEntry,
IdRange, RegionType, TransferSizes
}
import freechips.rocketchip.resources.{Resource, ResourceAddress, ResourcePermissions}
import freechips.rocketchip.util.{
AsyncQueueParams, BundleField, BundleFieldBase, BundleKeyBase,
CreditedDelay, groupByIntoSeq, RationalDirection, SimpleProduct
}
import scala.math.max
//These transfer sizes describe requests issued from masters on the A channel that will be responded by slaves on the D channel
case class TLMasterToSlaveTransferSizes(
// Supports both Acquire+Release of the following two sizes:
acquireT: TransferSizes = TransferSizes.none,
acquireB: TransferSizes = TransferSizes.none,
arithmetic: TransferSizes = TransferSizes.none,
logical: TransferSizes = TransferSizes.none,
get: TransferSizes = TransferSizes.none,
putFull: TransferSizes = TransferSizes.none,
putPartial: TransferSizes = TransferSizes.none,
hint: TransferSizes = TransferSizes.none)
extends TLCommonTransferSizes {
def intersect(rhs: TLMasterToSlaveTransferSizes) = TLMasterToSlaveTransferSizes(
acquireT = acquireT .intersect(rhs.acquireT),
acquireB = acquireB .intersect(rhs.acquireB),
arithmetic = arithmetic.intersect(rhs.arithmetic),
logical = logical .intersect(rhs.logical),
get = get .intersect(rhs.get),
putFull = putFull .intersect(rhs.putFull),
putPartial = putPartial.intersect(rhs.putPartial),
hint = hint .intersect(rhs.hint))
def mincover(rhs: TLMasterToSlaveTransferSizes) = TLMasterToSlaveTransferSizes(
acquireT = acquireT .mincover(rhs.acquireT),
acquireB = acquireB .mincover(rhs.acquireB),
arithmetic = arithmetic.mincover(rhs.arithmetic),
logical = logical .mincover(rhs.logical),
get = get .mincover(rhs.get),
putFull = putFull .mincover(rhs.putFull),
putPartial = putPartial.mincover(rhs.putPartial),
hint = hint .mincover(rhs.hint))
// Reduce rendering to a simple yes/no per field
override def toString = {
def str(x: TransferSizes, flag: String) = if (x.none) "" else flag
def flags = Vector(
str(acquireT, "T"),
str(acquireB, "B"),
str(arithmetic, "A"),
str(logical, "L"),
str(get, "G"),
str(putFull, "F"),
str(putPartial, "P"),
str(hint, "H"))
flags.mkString
}
// Prints out the actual information in a user readable way
def infoString = {
s"""acquireT = ${acquireT}
|acquireB = ${acquireB}
|arithmetic = ${arithmetic}
|logical = ${logical}
|get = ${get}
|putFull = ${putFull}
|putPartial = ${putPartial}
|hint = ${hint}
|
|""".stripMargin
}
}
object TLMasterToSlaveTransferSizes {
def unknownEmits = TLMasterToSlaveTransferSizes(
acquireT = TransferSizes(1, 4096),
acquireB = TransferSizes(1, 4096),
arithmetic = TransferSizes(1, 4096),
logical = TransferSizes(1, 4096),
get = TransferSizes(1, 4096),
putFull = TransferSizes(1, 4096),
putPartial = TransferSizes(1, 4096),
hint = TransferSizes(1, 4096))
def unknownSupports = TLMasterToSlaveTransferSizes()
}
//These transfer sizes describe requests issued from slaves on the B channel that will be responded by masters on the C channel
case class TLSlaveToMasterTransferSizes(
probe: TransferSizes = TransferSizes.none,
arithmetic: TransferSizes = TransferSizes.none,
logical: TransferSizes = TransferSizes.none,
get: TransferSizes = TransferSizes.none,
putFull: TransferSizes = TransferSizes.none,
putPartial: TransferSizes = TransferSizes.none,
hint: TransferSizes = TransferSizes.none
) extends TLCommonTransferSizes {
def intersect(rhs: TLSlaveToMasterTransferSizes) = TLSlaveToMasterTransferSizes(
probe = probe .intersect(rhs.probe),
arithmetic = arithmetic.intersect(rhs.arithmetic),
logical = logical .intersect(rhs.logical),
get = get .intersect(rhs.get),
putFull = putFull .intersect(rhs.putFull),
putPartial = putPartial.intersect(rhs.putPartial),
hint = hint .intersect(rhs.hint)
)
def mincover(rhs: TLSlaveToMasterTransferSizes) = TLSlaveToMasterTransferSizes(
probe = probe .mincover(rhs.probe),
arithmetic = arithmetic.mincover(rhs.arithmetic),
logical = logical .mincover(rhs.logical),
get = get .mincover(rhs.get),
putFull = putFull .mincover(rhs.putFull),
putPartial = putPartial.mincover(rhs.putPartial),
hint = hint .mincover(rhs.hint)
)
// Reduce rendering to a simple yes/no per field
override def toString = {
def str(x: TransferSizes, flag: String) = if (x.none) "" else flag
def flags = Vector(
str(probe, "P"),
str(arithmetic, "A"),
str(logical, "L"),
str(get, "G"),
str(putFull, "F"),
str(putPartial, "P"),
str(hint, "H"))
flags.mkString
}
// Prints out the actual information in a user readable way
def infoString = {
s"""probe = ${probe}
|arithmetic = ${arithmetic}
|logical = ${logical}
|get = ${get}
|putFull = ${putFull}
|putPartial = ${putPartial}
|hint = ${hint}
|
|""".stripMargin
}
}
object TLSlaveToMasterTransferSizes {
def unknownEmits = TLSlaveToMasterTransferSizes(
arithmetic = TransferSizes(1, 4096),
logical = TransferSizes(1, 4096),
get = TransferSizes(1, 4096),
putFull = TransferSizes(1, 4096),
putPartial = TransferSizes(1, 4096),
hint = TransferSizes(1, 4096),
probe = TransferSizes(1, 4096))
def unknownSupports = TLSlaveToMasterTransferSizes()
}
trait TLCommonTransferSizes {
def arithmetic: TransferSizes
def logical: TransferSizes
def get: TransferSizes
def putFull: TransferSizes
def putPartial: TransferSizes
def hint: TransferSizes
}
class TLSlaveParameters private(
val nodePath: Seq[BaseNode],
val resources: Seq[Resource],
setName: Option[String],
val address: Seq[AddressSet],
val regionType: RegionType.T,
val executable: Boolean,
val fifoId: Option[Int],
val supports: TLMasterToSlaveTransferSizes,
val emits: TLSlaveToMasterTransferSizes,
// By default, slaves are forbidden from issuing 'denied' responses (it prevents Fragmentation)
val alwaysGrantsT: Boolean, // typically only true for CacheCork'd read-write devices; dual: neverReleaseData
// If fifoId=Some, all accesses sent to the same fifoId are executed and ACK'd in FIFO order
// Note: you can only rely on this FIFO behaviour if your TLMasterParameters include requestFifo
val mayDenyGet: Boolean, // applies to: AccessAckData, GrantData
val mayDenyPut: Boolean) // applies to: AccessAck, Grant, HintAck
// ReleaseAck may NEVER be denied
extends SimpleProduct
{
def sortedAddress = address.sorted
override def canEqual(that: Any): Boolean = that.isInstanceOf[TLSlaveParameters]
override def productPrefix = "TLSlaveParameters"
// We intentionally omit nodePath for equality testing / formatting
def productArity: Int = 11
def productElement(n: Int): Any = n match {
case 0 => name
case 1 => address
case 2 => resources
case 3 => regionType
case 4 => executable
case 5 => fifoId
case 6 => supports
case 7 => emits
case 8 => alwaysGrantsT
case 9 => mayDenyGet
case 10 => mayDenyPut
case _ => throw new IndexOutOfBoundsException(n.toString)
}
def supportsAcquireT: TransferSizes = supports.acquireT
def supportsAcquireB: TransferSizes = supports.acquireB
def supportsArithmetic: TransferSizes = supports.arithmetic
def supportsLogical: TransferSizes = supports.logical
def supportsGet: TransferSizes = supports.get
def supportsPutFull: TransferSizes = supports.putFull
def supportsPutPartial: TransferSizes = supports.putPartial
def supportsHint: TransferSizes = supports.hint
require (!address.isEmpty, "Address cannot be empty")
address.foreach { a => require (a.finite, "Address must be finite") }
address.combinations(2).foreach { case Seq(x,y) => require (!x.overlaps(y), s"$x and $y overlap.") }
require (supportsPutFull.contains(supportsPutPartial), s"PutFull($supportsPutFull) < PutPartial($supportsPutPartial)")
require (supportsPutFull.contains(supportsArithmetic), s"PutFull($supportsPutFull) < Arithmetic($supportsArithmetic)")
require (supportsPutFull.contains(supportsLogical), s"PutFull($supportsPutFull) < Logical($supportsLogical)")
require (supportsGet.contains(supportsArithmetic), s"Get($supportsGet) < Arithmetic($supportsArithmetic)")
require (supportsGet.contains(supportsLogical), s"Get($supportsGet) < Logical($supportsLogical)")
require (supportsAcquireB.contains(supportsAcquireT), s"AcquireB($supportsAcquireB) < AcquireT($supportsAcquireT)")
require (!alwaysGrantsT || supportsAcquireT, s"Must supportAcquireT if promising to always grantT")
// Make sure that the regionType agrees with the capabilities
require (!supportsAcquireB || regionType >= RegionType.UNCACHED) // acquire -> uncached, tracked, cached
require (regionType <= RegionType.UNCACHED || supportsAcquireB) // tracked, cached -> acquire
require (regionType != RegionType.UNCACHED || supportsGet) // uncached -> supportsGet
val name = setName.orElse(nodePath.lastOption.map(_.lazyModule.name)).getOrElse("disconnected")
val maxTransfer = List( // Largest supported transfer of all types
supportsAcquireT.max,
supportsAcquireB.max,
supportsArithmetic.max,
supportsLogical.max,
supportsGet.max,
supportsPutFull.max,
supportsPutPartial.max).max
val maxAddress = address.map(_.max).max
val minAlignment = address.map(_.alignment).min
// The device had better not support a transfer larger than its alignment
require (minAlignment >= maxTransfer, s"Bad $address: minAlignment ($minAlignment) must be >= maxTransfer ($maxTransfer)")
def toResource: ResourceAddress = {
ResourceAddress(address, ResourcePermissions(
r = supportsAcquireB || supportsGet,
w = supportsAcquireT || supportsPutFull,
x = executable,
c = supportsAcquireB,
a = supportsArithmetic && supportsLogical))
}
def findTreeViolation() = nodePath.find {
case _: MixedAdapterNode[_, _, _, _, _, _, _, _] => false
case _: SinkNode[_, _, _, _, _] => false
case node => node.inputs.size != 1
}
def isTree = findTreeViolation() == None
def infoString = {
s"""Slave Name = ${name}
|Slave Address = ${address}
|supports = ${supports.infoString}
|
|""".stripMargin
}
def v1copy(
address: Seq[AddressSet] = address,
resources: Seq[Resource] = resources,
regionType: RegionType.T = regionType,
executable: Boolean = executable,
nodePath: Seq[BaseNode] = nodePath,
supportsAcquireT: TransferSizes = supports.acquireT,
supportsAcquireB: TransferSizes = supports.acquireB,
supportsArithmetic: TransferSizes = supports.arithmetic,
supportsLogical: TransferSizes = supports.logical,
supportsGet: TransferSizes = supports.get,
supportsPutFull: TransferSizes = supports.putFull,
supportsPutPartial: TransferSizes = supports.putPartial,
supportsHint: TransferSizes = supports.hint,
mayDenyGet: Boolean = mayDenyGet,
mayDenyPut: Boolean = mayDenyPut,
alwaysGrantsT: Boolean = alwaysGrantsT,
fifoId: Option[Int] = fifoId) =
{
new TLSlaveParameters(
setName = setName,
address = address,
resources = resources,
regionType = regionType,
executable = executable,
nodePath = nodePath,
supports = TLMasterToSlaveTransferSizes(
acquireT = supportsAcquireT,
acquireB = supportsAcquireB,
arithmetic = supportsArithmetic,
logical = supportsLogical,
get = supportsGet,
putFull = supportsPutFull,
putPartial = supportsPutPartial,
hint = supportsHint),
emits = emits,
mayDenyGet = mayDenyGet,
mayDenyPut = mayDenyPut,
alwaysGrantsT = alwaysGrantsT,
fifoId = fifoId)
}
def v2copy(
nodePath: Seq[BaseNode] = nodePath,
resources: Seq[Resource] = resources,
name: Option[String] = setName,
address: Seq[AddressSet] = address,
regionType: RegionType.T = regionType,
executable: Boolean = executable,
fifoId: Option[Int] = fifoId,
supports: TLMasterToSlaveTransferSizes = supports,
emits: TLSlaveToMasterTransferSizes = emits,
alwaysGrantsT: Boolean = alwaysGrantsT,
mayDenyGet: Boolean = mayDenyGet,
mayDenyPut: Boolean = mayDenyPut) =
{
new TLSlaveParameters(
nodePath = nodePath,
resources = resources,
setName = name,
address = address,
regionType = regionType,
executable = executable,
fifoId = fifoId,
supports = supports,
emits = emits,
alwaysGrantsT = alwaysGrantsT,
mayDenyGet = mayDenyGet,
mayDenyPut = mayDenyPut)
}
@deprecated("Use v1copy instead of copy","")
def copy(
address: Seq[AddressSet] = address,
resources: Seq[Resource] = resources,
regionType: RegionType.T = regionType,
executable: Boolean = executable,
nodePath: Seq[BaseNode] = nodePath,
supportsAcquireT: TransferSizes = supports.acquireT,
supportsAcquireB: TransferSizes = supports.acquireB,
supportsArithmetic: TransferSizes = supports.arithmetic,
supportsLogical: TransferSizes = supports.logical,
supportsGet: TransferSizes = supports.get,
supportsPutFull: TransferSizes = supports.putFull,
supportsPutPartial: TransferSizes = supports.putPartial,
supportsHint: TransferSizes = supports.hint,
mayDenyGet: Boolean = mayDenyGet,
mayDenyPut: Boolean = mayDenyPut,
alwaysGrantsT: Boolean = alwaysGrantsT,
fifoId: Option[Int] = fifoId) =
{
v1copy(
address = address,
resources = resources,
regionType = regionType,
executable = executable,
nodePath = nodePath,
supportsAcquireT = supportsAcquireT,
supportsAcquireB = supportsAcquireB,
supportsArithmetic = supportsArithmetic,
supportsLogical = supportsLogical,
supportsGet = supportsGet,
supportsPutFull = supportsPutFull,
supportsPutPartial = supportsPutPartial,
supportsHint = supportsHint,
mayDenyGet = mayDenyGet,
mayDenyPut = mayDenyPut,
alwaysGrantsT = alwaysGrantsT,
fifoId = fifoId)
}
}
object TLSlaveParameters {
def v1(
address: Seq[AddressSet],
resources: Seq[Resource] = Seq(),
regionType: RegionType.T = RegionType.GET_EFFECTS,
executable: Boolean = false,
nodePath: Seq[BaseNode] = Seq(),
supportsAcquireT: TransferSizes = TransferSizes.none,
supportsAcquireB: TransferSizes = TransferSizes.none,
supportsArithmetic: TransferSizes = TransferSizes.none,
supportsLogical: TransferSizes = TransferSizes.none,
supportsGet: TransferSizes = TransferSizes.none,
supportsPutFull: TransferSizes = TransferSizes.none,
supportsPutPartial: TransferSizes = TransferSizes.none,
supportsHint: TransferSizes = TransferSizes.none,
mayDenyGet: Boolean = false,
mayDenyPut: Boolean = false,
alwaysGrantsT: Boolean = false,
fifoId: Option[Int] = None) =
{
new TLSlaveParameters(
setName = None,
address = address,
resources = resources,
regionType = regionType,
executable = executable,
nodePath = nodePath,
supports = TLMasterToSlaveTransferSizes(
acquireT = supportsAcquireT,
acquireB = supportsAcquireB,
arithmetic = supportsArithmetic,
logical = supportsLogical,
get = supportsGet,
putFull = supportsPutFull,
putPartial = supportsPutPartial,
hint = supportsHint),
emits = TLSlaveToMasterTransferSizes.unknownEmits,
mayDenyGet = mayDenyGet,
mayDenyPut = mayDenyPut,
alwaysGrantsT = alwaysGrantsT,
fifoId = fifoId)
}
def v2(
address: Seq[AddressSet],
nodePath: Seq[BaseNode] = Seq(),
resources: Seq[Resource] = Seq(),
name: Option[String] = None,
regionType: RegionType.T = RegionType.GET_EFFECTS,
executable: Boolean = false,
fifoId: Option[Int] = None,
supports: TLMasterToSlaveTransferSizes = TLMasterToSlaveTransferSizes.unknownSupports,
emits: TLSlaveToMasterTransferSizes = TLSlaveToMasterTransferSizes.unknownEmits,
alwaysGrantsT: Boolean = false,
mayDenyGet: Boolean = false,
mayDenyPut: Boolean = false) =
{
new TLSlaveParameters(
nodePath = nodePath,
resources = resources,
setName = name,
address = address,
regionType = regionType,
executable = executable,
fifoId = fifoId,
supports = supports,
emits = emits,
alwaysGrantsT = alwaysGrantsT,
mayDenyGet = mayDenyGet,
mayDenyPut = mayDenyPut)
}
}
object TLManagerParameters {
@deprecated("Use TLSlaveParameters.v1 instead of TLManagerParameters","")
def apply(
address: Seq[AddressSet],
resources: Seq[Resource] = Seq(),
regionType: RegionType.T = RegionType.GET_EFFECTS,
executable: Boolean = false,
nodePath: Seq[BaseNode] = Seq(),
supportsAcquireT: TransferSizes = TransferSizes.none,
supportsAcquireB: TransferSizes = TransferSizes.none,
supportsArithmetic: TransferSizes = TransferSizes.none,
supportsLogical: TransferSizes = TransferSizes.none,
supportsGet: TransferSizes = TransferSizes.none,
supportsPutFull: TransferSizes = TransferSizes.none,
supportsPutPartial: TransferSizes = TransferSizes.none,
supportsHint: TransferSizes = TransferSizes.none,
mayDenyGet: Boolean = false,
mayDenyPut: Boolean = false,
alwaysGrantsT: Boolean = false,
fifoId: Option[Int] = None) =
TLSlaveParameters.v1(
address,
resources,
regionType,
executable,
nodePath,
supportsAcquireT,
supportsAcquireB,
supportsArithmetic,
supportsLogical,
supportsGet,
supportsPutFull,
supportsPutPartial,
supportsHint,
mayDenyGet,
mayDenyPut,
alwaysGrantsT,
fifoId,
)
}
case class TLChannelBeatBytes(a: Option[Int], b: Option[Int], c: Option[Int], d: Option[Int])
{
def members = Seq(a, b, c, d)
members.collect { case Some(beatBytes) =>
require (isPow2(beatBytes), "Data channel width must be a power of 2")
}
}
object TLChannelBeatBytes{
def apply(beatBytes: Int): TLChannelBeatBytes = TLChannelBeatBytes(
Some(beatBytes),
Some(beatBytes),
Some(beatBytes),
Some(beatBytes))
def apply(): TLChannelBeatBytes = TLChannelBeatBytes(
None,
None,
None,
None)
}
class TLSlavePortParameters private(
val slaves: Seq[TLSlaveParameters],
val channelBytes: TLChannelBeatBytes,
val endSinkId: Int,
val minLatency: Int,
val responseFields: Seq[BundleFieldBase],
val requestKeys: Seq[BundleKeyBase]) extends SimpleProduct
{
def sortedSlaves = slaves.sortBy(_.sortedAddress.head)
override def canEqual(that: Any): Boolean = that.isInstanceOf[TLSlavePortParameters]
override def productPrefix = "TLSlavePortParameters"
def productArity: Int = 6
def productElement(n: Int): Any = n match {
case 0 => slaves
case 1 => channelBytes
case 2 => endSinkId
case 3 => minLatency
case 4 => responseFields
case 5 => requestKeys
case _ => throw new IndexOutOfBoundsException(n.toString)
}
require (!slaves.isEmpty, "Slave ports must have slaves")
require (endSinkId >= 0, "Sink ids cannot be negative")
require (minLatency >= 0, "Minimum required latency cannot be negative")
// Using this API implies you cannot handle mixed-width busses
def beatBytes = {
channelBytes.members.foreach { width =>
require (width.isDefined && width == channelBytes.a)
}
channelBytes.a.get
}
// TODO this should be deprecated
def managers = slaves
def requireFifo(policy: TLFIFOFixer.Policy = TLFIFOFixer.allFIFO) = {
val relevant = slaves.filter(m => policy(m))
relevant.foreach { m =>
require(m.fifoId == relevant.head.fifoId, s"${m.name} had fifoId ${m.fifoId}, which was not homogeneous (${slaves.map(s => (s.name, s.fifoId))}) ")
}
}
// Bounds on required sizes
def maxAddress = slaves.map(_.maxAddress).max
def maxTransfer = slaves.map(_.maxTransfer).max
def mayDenyGet = slaves.exists(_.mayDenyGet)
def mayDenyPut = slaves.exists(_.mayDenyPut)
// Diplomatically determined operation sizes emitted by all outward Slaves
// as opposed to emits* which generate circuitry to check which specific addresses
val allEmitClaims = slaves.map(_.emits).reduce( _ intersect _)
// Operation Emitted by at least one outward Slaves
// as opposed to emits* which generate circuitry to check which specific addresses
val anyEmitClaims = slaves.map(_.emits).reduce(_ mincover _)
// Diplomatically determined operation sizes supported by all outward Slaves
// as opposed to supports* which generate circuitry to check which specific addresses
val allSupportClaims = slaves.map(_.supports).reduce( _ intersect _)
val allSupportAcquireT = allSupportClaims.acquireT
val allSupportAcquireB = allSupportClaims.acquireB
val allSupportArithmetic = allSupportClaims.arithmetic
val allSupportLogical = allSupportClaims.logical
val allSupportGet = allSupportClaims.get
val allSupportPutFull = allSupportClaims.putFull
val allSupportPutPartial = allSupportClaims.putPartial
val allSupportHint = allSupportClaims.hint
// Operation supported by at least one outward Slaves
// as opposed to supports* which generate circuitry to check which specific addresses
val anySupportClaims = slaves.map(_.supports).reduce(_ mincover _)
val anySupportAcquireT = !anySupportClaims.acquireT.none
val anySupportAcquireB = !anySupportClaims.acquireB.none
val anySupportArithmetic = !anySupportClaims.arithmetic.none
val anySupportLogical = !anySupportClaims.logical.none
val anySupportGet = !anySupportClaims.get.none
val anySupportPutFull = !anySupportClaims.putFull.none
val anySupportPutPartial = !anySupportClaims.putPartial.none
val anySupportHint = !anySupportClaims.hint.none
// Supporting Acquire means being routable for GrantAck
require ((endSinkId == 0) == !anySupportAcquireB)
// These return Option[TLSlaveParameters] for your convenience
def find(address: BigInt) = slaves.find(_.address.exists(_.contains(address)))
// The safe version will check the entire address
def findSafe(address: UInt) = VecInit(sortedSlaves.map(_.address.map(_.contains(address)).reduce(_ || _)))
// The fast version assumes the address is valid (you probably want fastProperty instead of this function)
def findFast(address: UInt) = {
val routingMask = AddressDecoder(slaves.map(_.address))
VecInit(sortedSlaves.map(_.address.map(_.widen(~routingMask)).distinct.map(_.contains(address)).reduce(_ || _)))
}
// Compute the simplest AddressSets that decide a key
def fastPropertyGroup[K](p: TLSlaveParameters => K): Seq[(K, Seq[AddressSet])] = {
val groups = groupByIntoSeq(sortedSlaves.map(m => (p(m), m.address)))( _._1).map { case (k, vs) =>
k -> vs.flatMap(_._2)
}
val reductionMask = AddressDecoder(groups.map(_._2))
groups.map { case (k, seq) => k -> AddressSet.unify(seq.map(_.widen(~reductionMask)).distinct) }
}
// Select a property
def fastProperty[K, D <: Data](address: UInt, p: TLSlaveParameters => K, d: K => D): D =
Mux1H(fastPropertyGroup(p).map { case (v, a) => (a.map(_.contains(address)).reduce(_||_), d(v)) })
// Note: returns the actual fifoId + 1 or 0 if None
def findFifoIdFast(address: UInt) = fastProperty(address, _.fifoId.map(_+1).getOrElse(0), (i:Int) => i.U)
def hasFifoIdFast(address: UInt) = fastProperty(address, _.fifoId.isDefined, (b:Boolean) => b.B)
// Does this Port manage this ID/address?
def containsSafe(address: UInt) = findSafe(address).reduce(_ || _)
private def addressHelper(
// setting safe to false indicates that all addresses are expected to be legal, which might reduce circuit complexity
safe: Boolean,
// member filters out the sizes being checked based on the opcode being emitted or supported
member: TLSlaveParameters => TransferSizes,
address: UInt,
lgSize: UInt,
// range provides a limit on the sizes that are expected to be evaluated, which might reduce circuit complexity
range: Option[TransferSizes]): Bool = {
// trim reduces circuit complexity by intersecting checked sizes with the range argument
def trim(x: TransferSizes) = range.map(_.intersect(x)).getOrElse(x)
// groupBy returns an unordered map, convert back to Seq and sort the result for determinism
// groupByIntoSeq is turning slaves into trimmed membership sizes
// We are grouping all the slaves by their transfer size where
// if they support the trimmed size then
// member is the type of transfer that you are looking for (What you are trying to filter on)
// When you consider membership, you are trimming the sizes to only the ones that you care about
// you are filtering the slaves based on both whether they support a particular opcode and the size
// Grouping the slaves based on the actual transfer size range they support
// intersecting the range and checking their membership
// FOR SUPPORTCASES instead of returning the list of slaves,
// you are returning a map from transfer size to the set of
// address sets that are supported for that transfer size
// find all the slaves that support a certain type of operation and then group their addresses by the supported size
// for every size there could be multiple address ranges
// safety is a trade off between checking between all possible addresses vs only the addresses
// that are known to have supported sizes
// the trade off is 'checking all addresses is a more expensive circuit but will always give you
// the right answer even if you give it an illegal address'
// the not safe version is a cheaper circuit but if you give it an illegal address then it might produce the wrong answer
// fast presumes address legality
// This groupByIntoSeq deterministically groups all address sets for which a given `member` transfer size applies.
// In the resulting Map of cases, the keys are transfer sizes and the values are all address sets which emit or support that size.
val supportCases = groupByIntoSeq(slaves)(m => trim(member(m))).map { case (k: TransferSizes, vs: Seq[TLSlaveParameters]) =>
k -> vs.flatMap(_.address)
}
// safe produces a circuit that compares against all possible addresses,
// whereas fast presumes that the address is legal but uses an efficient address decoder
val mask = if (safe) ~BigInt(0) else AddressDecoder(supportCases.map(_._2))
// Simplified creates the most concise possible representation of each cases' address sets based on the mask.
val simplified = supportCases.map { case (k, seq) => k -> AddressSet.unify(seq.map(_.widen(~mask)).distinct) }
simplified.map { case (s, a) =>
// s is a size, you are checking for this size either the size of the operation is in s
// We return an or-reduction of all the cases, checking whether any contains both the dynamic size and dynamic address on the wire.
((Some(s) == range).B || s.containsLg(lgSize)) &&
a.map(_.contains(address)).reduce(_||_)
}.foldLeft(false.B)(_||_)
}
def supportsAcquireTSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.acquireT, address, lgSize, range)
def supportsAcquireBSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.acquireB, address, lgSize, range)
def supportsArithmeticSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.arithmetic, address, lgSize, range)
def supportsLogicalSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.logical, address, lgSize, range)
def supportsGetSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.get, address, lgSize, range)
def supportsPutFullSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.putFull, address, lgSize, range)
def supportsPutPartialSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.putPartial, address, lgSize, range)
def supportsHintSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.hint, address, lgSize, range)
def supportsAcquireTFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.acquireT, address, lgSize, range)
def supportsAcquireBFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.acquireB, address, lgSize, range)
def supportsArithmeticFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.arithmetic, address, lgSize, range)
def supportsLogicalFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.logical, address, lgSize, range)
def supportsGetFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.get, address, lgSize, range)
def supportsPutFullFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.putFull, address, lgSize, range)
def supportsPutPartialFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.putPartial, address, lgSize, range)
def supportsHintFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.hint, address, lgSize, range)
def emitsProbeSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.probe, address, lgSize, range)
def emitsArithmeticSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.arithmetic, address, lgSize, range)
def emitsLogicalSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.logical, address, lgSize, range)
def emitsGetSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.get, address, lgSize, range)
def emitsPutFullSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.putFull, address, lgSize, range)
def emitsPutPartialSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.putPartial, address, lgSize, range)
def emitsHintSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.hint, address, lgSize, range)
def findTreeViolation() = slaves.flatMap(_.findTreeViolation()).headOption
def isTree = !slaves.exists(!_.isTree)
def infoString = "Slave Port Beatbytes = " + beatBytes + "\n" + "Slave Port MinLatency = " + minLatency + "\n\n" + slaves.map(_.infoString).mkString
def v1copy(
managers: Seq[TLSlaveParameters] = slaves,
beatBytes: Int = -1,
endSinkId: Int = endSinkId,
minLatency: Int = minLatency,
responseFields: Seq[BundleFieldBase] = responseFields,
requestKeys: Seq[BundleKeyBase] = requestKeys) =
{
new TLSlavePortParameters(
slaves = managers,
channelBytes = if (beatBytes != -1) TLChannelBeatBytes(beatBytes) else channelBytes,
endSinkId = endSinkId,
minLatency = minLatency,
responseFields = responseFields,
requestKeys = requestKeys)
}
def v2copy(
slaves: Seq[TLSlaveParameters] = slaves,
channelBytes: TLChannelBeatBytes = channelBytes,
endSinkId: Int = endSinkId,
minLatency: Int = minLatency,
responseFields: Seq[BundleFieldBase] = responseFields,
requestKeys: Seq[BundleKeyBase] = requestKeys) =
{
new TLSlavePortParameters(
slaves = slaves,
channelBytes = channelBytes,
endSinkId = endSinkId,
minLatency = minLatency,
responseFields = responseFields,
requestKeys = requestKeys)
}
@deprecated("Use v1copy instead of copy","")
def copy(
managers: Seq[TLSlaveParameters] = slaves,
beatBytes: Int = -1,
endSinkId: Int = endSinkId,
minLatency: Int = minLatency,
responseFields: Seq[BundleFieldBase] = responseFields,
requestKeys: Seq[BundleKeyBase] = requestKeys) =
{
v1copy(
managers,
beatBytes,
endSinkId,
minLatency,
responseFields,
requestKeys)
}
}
object TLSlavePortParameters {
def v1(
managers: Seq[TLSlaveParameters],
beatBytes: Int,
endSinkId: Int = 0,
minLatency: Int = 0,
responseFields: Seq[BundleFieldBase] = Nil,
requestKeys: Seq[BundleKeyBase] = Nil) =
{
new TLSlavePortParameters(
slaves = managers,
channelBytes = TLChannelBeatBytes(beatBytes),
endSinkId = endSinkId,
minLatency = minLatency,
responseFields = responseFields,
requestKeys = requestKeys)
}
}
object TLManagerPortParameters {
@deprecated("Use TLSlavePortParameters.v1 instead of TLManagerPortParameters","")
def apply(
managers: Seq[TLSlaveParameters],
beatBytes: Int,
endSinkId: Int = 0,
minLatency: Int = 0,
responseFields: Seq[BundleFieldBase] = Nil,
requestKeys: Seq[BundleKeyBase] = Nil) =
{
TLSlavePortParameters.v1(
managers,
beatBytes,
endSinkId,
minLatency,
responseFields,
requestKeys)
}
}
class TLMasterParameters private(
val nodePath: Seq[BaseNode],
val resources: Seq[Resource],
val name: String,
val visibility: Seq[AddressSet],
val unusedRegionTypes: Set[RegionType.T],
val executesOnly: Boolean,
val requestFifo: Boolean, // only a request, not a requirement. applies to A, not C.
val supports: TLSlaveToMasterTransferSizes,
val emits: TLMasterToSlaveTransferSizes,
val neverReleasesData: Boolean,
val sourceId: IdRange) extends SimpleProduct
{
override def canEqual(that: Any): Boolean = that.isInstanceOf[TLMasterParameters]
override def productPrefix = "TLMasterParameters"
// We intentionally omit nodePath for equality testing / formatting
def productArity: Int = 10
def productElement(n: Int): Any = n match {
case 0 => name
case 1 => sourceId
case 2 => resources
case 3 => visibility
case 4 => unusedRegionTypes
case 5 => executesOnly
case 6 => requestFifo
case 7 => supports
case 8 => emits
case 9 => neverReleasesData
case _ => throw new IndexOutOfBoundsException(n.toString)
}
require (!sourceId.isEmpty)
require (!visibility.isEmpty)
require (supports.putFull.contains(supports.putPartial))
// We only support these operations if we support Probe (ie: we're a cache)
require (supports.probe.contains(supports.arithmetic))
require (supports.probe.contains(supports.logical))
require (supports.probe.contains(supports.get))
require (supports.probe.contains(supports.putFull))
require (supports.probe.contains(supports.putPartial))
require (supports.probe.contains(supports.hint))
visibility.combinations(2).foreach { case Seq(x,y) => require (!x.overlaps(y), s"$x and $y overlap.") }
val maxTransfer = List(
supports.probe.max,
supports.arithmetic.max,
supports.logical.max,
supports.get.max,
supports.putFull.max,
supports.putPartial.max).max
def infoString = {
s"""Master Name = ${name}
|visibility = ${visibility}
|emits = ${emits.infoString}
|sourceId = ${sourceId}
|
|""".stripMargin
}
def v1copy(
name: String = name,
sourceId: IdRange = sourceId,
nodePath: Seq[BaseNode] = nodePath,
requestFifo: Boolean = requestFifo,
visibility: Seq[AddressSet] = visibility,
supportsProbe: TransferSizes = supports.probe,
supportsArithmetic: TransferSizes = supports.arithmetic,
supportsLogical: TransferSizes = supports.logical,
supportsGet: TransferSizes = supports.get,
supportsPutFull: TransferSizes = supports.putFull,
supportsPutPartial: TransferSizes = supports.putPartial,
supportsHint: TransferSizes = supports.hint) =
{
new TLMasterParameters(
nodePath = nodePath,
resources = this.resources,
name = name,
visibility = visibility,
unusedRegionTypes = this.unusedRegionTypes,
executesOnly = this.executesOnly,
requestFifo = requestFifo,
supports = TLSlaveToMasterTransferSizes(
probe = supportsProbe,
arithmetic = supportsArithmetic,
logical = supportsLogical,
get = supportsGet,
putFull = supportsPutFull,
putPartial = supportsPutPartial,
hint = supportsHint),
emits = this.emits,
neverReleasesData = this.neverReleasesData,
sourceId = sourceId)
}
def v2copy(
nodePath: Seq[BaseNode] = nodePath,
resources: Seq[Resource] = resources,
name: String = name,
visibility: Seq[AddressSet] = visibility,
unusedRegionTypes: Set[RegionType.T] = unusedRegionTypes,
executesOnly: Boolean = executesOnly,
requestFifo: Boolean = requestFifo,
supports: TLSlaveToMasterTransferSizes = supports,
emits: TLMasterToSlaveTransferSizes = emits,
neverReleasesData: Boolean = neverReleasesData,
sourceId: IdRange = sourceId) =
{
new TLMasterParameters(
nodePath = nodePath,
resources = resources,
name = name,
visibility = visibility,
unusedRegionTypes = unusedRegionTypes,
executesOnly = executesOnly,
requestFifo = requestFifo,
supports = supports,
emits = emits,
neverReleasesData = neverReleasesData,
sourceId = sourceId)
}
@deprecated("Use v1copy instead of copy","")
def copy(
name: String = name,
sourceId: IdRange = sourceId,
nodePath: Seq[BaseNode] = nodePath,
requestFifo: Boolean = requestFifo,
visibility: Seq[AddressSet] = visibility,
supportsProbe: TransferSizes = supports.probe,
supportsArithmetic: TransferSizes = supports.arithmetic,
supportsLogical: TransferSizes = supports.logical,
supportsGet: TransferSizes = supports.get,
supportsPutFull: TransferSizes = supports.putFull,
supportsPutPartial: TransferSizes = supports.putPartial,
supportsHint: TransferSizes = supports.hint) =
{
v1copy(
name = name,
sourceId = sourceId,
nodePath = nodePath,
requestFifo = requestFifo,
visibility = visibility,
supportsProbe = supportsProbe,
supportsArithmetic = supportsArithmetic,
supportsLogical = supportsLogical,
supportsGet = supportsGet,
supportsPutFull = supportsPutFull,
supportsPutPartial = supportsPutPartial,
supportsHint = supportsHint)
}
}
object TLMasterParameters {
def v1(
name: String,
sourceId: IdRange = IdRange(0,1),
nodePath: Seq[BaseNode] = Seq(),
requestFifo: Boolean = false,
visibility: Seq[AddressSet] = Seq(AddressSet(0, ~0)),
supportsProbe: TransferSizes = TransferSizes.none,
supportsArithmetic: TransferSizes = TransferSizes.none,
supportsLogical: TransferSizes = TransferSizes.none,
supportsGet: TransferSizes = TransferSizes.none,
supportsPutFull: TransferSizes = TransferSizes.none,
supportsPutPartial: TransferSizes = TransferSizes.none,
supportsHint: TransferSizes = TransferSizes.none) =
{
new TLMasterParameters(
nodePath = nodePath,
resources = Nil,
name = name,
visibility = visibility,
unusedRegionTypes = Set(),
executesOnly = false,
requestFifo = requestFifo,
supports = TLSlaveToMasterTransferSizes(
probe = supportsProbe,
arithmetic = supportsArithmetic,
logical = supportsLogical,
get = supportsGet,
putFull = supportsPutFull,
putPartial = supportsPutPartial,
hint = supportsHint),
emits = TLMasterToSlaveTransferSizes.unknownEmits,
neverReleasesData = false,
sourceId = sourceId)
}
def v2(
nodePath: Seq[BaseNode] = Seq(),
resources: Seq[Resource] = Nil,
name: String,
visibility: Seq[AddressSet] = Seq(AddressSet(0, ~0)),
unusedRegionTypes: Set[RegionType.T] = Set(),
executesOnly: Boolean = false,
requestFifo: Boolean = false,
supports: TLSlaveToMasterTransferSizes = TLSlaveToMasterTransferSizes.unknownSupports,
emits: TLMasterToSlaveTransferSizes = TLMasterToSlaveTransferSizes.unknownEmits,
neverReleasesData: Boolean = false,
sourceId: IdRange = IdRange(0,1)) =
{
new TLMasterParameters(
nodePath = nodePath,
resources = resources,
name = name,
visibility = visibility,
unusedRegionTypes = unusedRegionTypes,
executesOnly = executesOnly,
requestFifo = requestFifo,
supports = supports,
emits = emits,
neverReleasesData = neverReleasesData,
sourceId = sourceId)
}
}
object TLClientParameters {
@deprecated("Use TLMasterParameters.v1 instead of TLClientParameters","")
def apply(
name: String,
sourceId: IdRange = IdRange(0,1),
nodePath: Seq[BaseNode] = Seq(),
requestFifo: Boolean = false,
visibility: Seq[AddressSet] = Seq(AddressSet.everything),
supportsProbe: TransferSizes = TransferSizes.none,
supportsArithmetic: TransferSizes = TransferSizes.none,
supportsLogical: TransferSizes = TransferSizes.none,
supportsGet: TransferSizes = TransferSizes.none,
supportsPutFull: TransferSizes = TransferSizes.none,
supportsPutPartial: TransferSizes = TransferSizes.none,
supportsHint: TransferSizes = TransferSizes.none) =
{
TLMasterParameters.v1(
name = name,
sourceId = sourceId,
nodePath = nodePath,
requestFifo = requestFifo,
visibility = visibility,
supportsProbe = supportsProbe,
supportsArithmetic = supportsArithmetic,
supportsLogical = supportsLogical,
supportsGet = supportsGet,
supportsPutFull = supportsPutFull,
supportsPutPartial = supportsPutPartial,
supportsHint = supportsHint)
}
}
class TLMasterPortParameters private(
val masters: Seq[TLMasterParameters],
val channelBytes: TLChannelBeatBytes,
val minLatency: Int,
val echoFields: Seq[BundleFieldBase],
val requestFields: Seq[BundleFieldBase],
val responseKeys: Seq[BundleKeyBase]) extends SimpleProduct
{
override def canEqual(that: Any): Boolean = that.isInstanceOf[TLMasterPortParameters]
override def productPrefix = "TLMasterPortParameters"
def productArity: Int = 6
def productElement(n: Int): Any = n match {
case 0 => masters
case 1 => channelBytes
case 2 => minLatency
case 3 => echoFields
case 4 => requestFields
case 5 => responseKeys
case _ => throw new IndexOutOfBoundsException(n.toString)
}
require (!masters.isEmpty)
require (minLatency >= 0)
def clients = masters
// Require disjoint ranges for Ids
IdRange.overlaps(masters.map(_.sourceId)).foreach { case (x, y) =>
require (!x.overlaps(y), s"TLClientParameters.sourceId ${x} overlaps ${y}")
}
// Bounds on required sizes
def endSourceId = masters.map(_.sourceId.end).max
def maxTransfer = masters.map(_.maxTransfer).max
// The unused sources < endSourceId
def unusedSources: Seq[Int] = {
val usedSources = masters.map(_.sourceId).sortBy(_.start)
((Seq(0) ++ usedSources.map(_.end)) zip usedSources.map(_.start)) flatMap { case (end, start) =>
end until start
}
}
// Diplomatically determined operation sizes emitted by all inward Masters
// as opposed to emits* which generate circuitry to check which specific addresses
val allEmitClaims = masters.map(_.emits).reduce( _ intersect _)
// Diplomatically determined operation sizes Emitted by at least one inward Masters
// as opposed to emits* which generate circuitry to check which specific addresses
val anyEmitClaims = masters.map(_.emits).reduce(_ mincover _)
// Diplomatically determined operation sizes supported by all inward Masters
// as opposed to supports* which generate circuitry to check which specific addresses
val allSupportProbe = masters.map(_.supports.probe) .reduce(_ intersect _)
val allSupportArithmetic = masters.map(_.supports.arithmetic).reduce(_ intersect _)
val allSupportLogical = masters.map(_.supports.logical) .reduce(_ intersect _)
val allSupportGet = masters.map(_.supports.get) .reduce(_ intersect _)
val allSupportPutFull = masters.map(_.supports.putFull) .reduce(_ intersect _)
val allSupportPutPartial = masters.map(_.supports.putPartial).reduce(_ intersect _)
val allSupportHint = masters.map(_.supports.hint) .reduce(_ intersect _)
// Diplomatically determined operation sizes supported by at least one master
// as opposed to supports* which generate circuitry to check which specific addresses
val anySupportProbe = masters.map(!_.supports.probe.none) .reduce(_ || _)
val anySupportArithmetic = masters.map(!_.supports.arithmetic.none).reduce(_ || _)
val anySupportLogical = masters.map(!_.supports.logical.none) .reduce(_ || _)
val anySupportGet = masters.map(!_.supports.get.none) .reduce(_ || _)
val anySupportPutFull = masters.map(!_.supports.putFull.none) .reduce(_ || _)
val anySupportPutPartial = masters.map(!_.supports.putPartial.none).reduce(_ || _)
val anySupportHint = masters.map(!_.supports.hint.none) .reduce(_ || _)
// These return Option[TLMasterParameters] for your convenience
def find(id: Int) = masters.find(_.sourceId.contains(id))
// Synthesizable lookup methods
def find(id: UInt) = VecInit(masters.map(_.sourceId.contains(id)))
def contains(id: UInt) = find(id).reduce(_ || _)
def requestFifo(id: UInt) = Mux1H(find(id), masters.map(c => c.requestFifo.B))
// Available during RTL runtime, checks to see if (id, size) is supported by the master's (client's) diplomatic parameters
private def sourceIdHelper(member: TLMasterParameters => TransferSizes)(id: UInt, lgSize: UInt) = {
val allSame = masters.map(member(_) == member(masters(0))).reduce(_ && _)
// this if statement is a coarse generalization of the groupBy in the sourceIdHelper2 version;
// the case where there is only one group.
if (allSame) member(masters(0)).containsLg(lgSize) else {
// Find the master associated with ID and returns whether that particular master is able to receive transaction of lgSize
Mux1H(find(id), masters.map(member(_).containsLg(lgSize)))
}
}
// Check for support of a given operation at a specific id
val supportsProbe = sourceIdHelper(_.supports.probe) _
val supportsArithmetic = sourceIdHelper(_.supports.arithmetic) _
val supportsLogical = sourceIdHelper(_.supports.logical) _
val supportsGet = sourceIdHelper(_.supports.get) _
val supportsPutFull = sourceIdHelper(_.supports.putFull) _
val supportsPutPartial = sourceIdHelper(_.supports.putPartial) _
val supportsHint = sourceIdHelper(_.supports.hint) _
// TODO: Merge sourceIdHelper2 with sourceIdHelper
private def sourceIdHelper2(
member: TLMasterParameters => TransferSizes,
sourceId: UInt,
lgSize: UInt): Bool = {
// Because sourceIds are uniquely owned by each master, we use them to group the
// cases that have to be checked.
val emitCases = groupByIntoSeq(masters)(m => member(m)).map { case (k, vs) =>
k -> vs.map(_.sourceId)
}
emitCases.map { case (s, a) =>
(s.containsLg(lgSize)) &&
a.map(_.contains(sourceId)).reduce(_||_)
}.foldLeft(false.B)(_||_)
}
// Check for emit of a given operation at a specific id
def emitsAcquireT (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.acquireT, sourceId, lgSize)
def emitsAcquireB (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.acquireB, sourceId, lgSize)
def emitsArithmetic(sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.arithmetic, sourceId, lgSize)
def emitsLogical (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.logical, sourceId, lgSize)
def emitsGet (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.get, sourceId, lgSize)
def emitsPutFull (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.putFull, sourceId, lgSize)
def emitsPutPartial(sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.putPartial, sourceId, lgSize)
def emitsHint (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.hint, sourceId, lgSize)
def infoString = masters.map(_.infoString).mkString
def v1copy(
clients: Seq[TLMasterParameters] = masters,
minLatency: Int = minLatency,
echoFields: Seq[BundleFieldBase] = echoFields,
requestFields: Seq[BundleFieldBase] = requestFields,
responseKeys: Seq[BundleKeyBase] = responseKeys) =
{
new TLMasterPortParameters(
masters = clients,
channelBytes = channelBytes,
minLatency = minLatency,
echoFields = echoFields,
requestFields = requestFields,
responseKeys = responseKeys)
}
def v2copy(
masters: Seq[TLMasterParameters] = masters,
channelBytes: TLChannelBeatBytes = channelBytes,
minLatency: Int = minLatency,
echoFields: Seq[BundleFieldBase] = echoFields,
requestFields: Seq[BundleFieldBase] = requestFields,
responseKeys: Seq[BundleKeyBase] = responseKeys) =
{
new TLMasterPortParameters(
masters = masters,
channelBytes = channelBytes,
minLatency = minLatency,
echoFields = echoFields,
requestFields = requestFields,
responseKeys = responseKeys)
}
@deprecated("Use v1copy instead of copy","")
def copy(
clients: Seq[TLMasterParameters] = masters,
minLatency: Int = minLatency,
echoFields: Seq[BundleFieldBase] = echoFields,
requestFields: Seq[BundleFieldBase] = requestFields,
responseKeys: Seq[BundleKeyBase] = responseKeys) =
{
v1copy(
clients,
minLatency,
echoFields,
requestFields,
responseKeys)
}
}
object TLClientPortParameters {
@deprecated("Use TLMasterPortParameters.v1 instead of TLClientPortParameters","")
def apply(
clients: Seq[TLMasterParameters],
minLatency: Int = 0,
echoFields: Seq[BundleFieldBase] = Nil,
requestFields: Seq[BundleFieldBase] = Nil,
responseKeys: Seq[BundleKeyBase] = Nil) =
{
TLMasterPortParameters.v1(
clients,
minLatency,
echoFields,
requestFields,
responseKeys)
}
}
object TLMasterPortParameters {
def v1(
clients: Seq[TLMasterParameters],
minLatency: Int = 0,
echoFields: Seq[BundleFieldBase] = Nil,
requestFields: Seq[BundleFieldBase] = Nil,
responseKeys: Seq[BundleKeyBase] = Nil) =
{
new TLMasterPortParameters(
masters = clients,
channelBytes = TLChannelBeatBytes(),
minLatency = minLatency,
echoFields = echoFields,
requestFields = requestFields,
responseKeys = responseKeys)
}
def v2(
masters: Seq[TLMasterParameters],
channelBytes: TLChannelBeatBytes = TLChannelBeatBytes(),
minLatency: Int = 0,
echoFields: Seq[BundleFieldBase] = Nil,
requestFields: Seq[BundleFieldBase] = Nil,
responseKeys: Seq[BundleKeyBase] = Nil) =
{
new TLMasterPortParameters(
masters = masters,
channelBytes = channelBytes,
minLatency = minLatency,
echoFields = echoFields,
requestFields = requestFields,
responseKeys = responseKeys)
}
}
case class TLBundleParameters(
addressBits: Int,
dataBits: Int,
sourceBits: Int,
sinkBits: Int,
sizeBits: Int,
echoFields: Seq[BundleFieldBase],
requestFields: Seq[BundleFieldBase],
responseFields: Seq[BundleFieldBase],
hasBCE: Boolean)
{
// Chisel has issues with 0-width wires
require (addressBits >= 1)
require (dataBits >= 8)
require (sourceBits >= 1)
require (sinkBits >= 1)
require (sizeBits >= 1)
require (isPow2(dataBits))
echoFields.foreach { f => require (f.key.isControl, s"${f} is not a legal echo field") }
val addrLoBits = log2Up(dataBits/8)
// Used to uniquify bus IP names
def shortName = s"a${addressBits}d${dataBits}s${sourceBits}k${sinkBits}z${sizeBits}" + (if (hasBCE) "c" else "u")
def union(x: TLBundleParameters) =
TLBundleParameters(
max(addressBits, x.addressBits),
max(dataBits, x.dataBits),
max(sourceBits, x.sourceBits),
max(sinkBits, x.sinkBits),
max(sizeBits, x.sizeBits),
echoFields = BundleField.union(echoFields ++ x.echoFields),
requestFields = BundleField.union(requestFields ++ x.requestFields),
responseFields = BundleField.union(responseFields ++ x.responseFields),
hasBCE || x.hasBCE)
}
object TLBundleParameters
{
val emptyBundleParams = TLBundleParameters(
addressBits = 1,
dataBits = 8,
sourceBits = 1,
sinkBits = 1,
sizeBits = 1,
echoFields = Nil,
requestFields = Nil,
responseFields = Nil,
hasBCE = false)
def union(x: Seq[TLBundleParameters]) = x.foldLeft(emptyBundleParams)((x,y) => x.union(y))
def apply(master: TLMasterPortParameters, slave: TLSlavePortParameters) =
new TLBundleParameters(
addressBits = log2Up(slave.maxAddress + 1),
dataBits = slave.beatBytes * 8,
sourceBits = log2Up(master.endSourceId),
sinkBits = log2Up(slave.endSinkId),
sizeBits = log2Up(log2Ceil(max(master.maxTransfer, slave.maxTransfer))+1),
echoFields = master.echoFields,
requestFields = BundleField.accept(master.requestFields, slave.requestKeys),
responseFields = BundleField.accept(slave.responseFields, master.responseKeys),
hasBCE = master.anySupportProbe && slave.anySupportAcquireB)
}
case class TLEdgeParameters(
master: TLMasterPortParameters,
slave: TLSlavePortParameters,
params: Parameters,
sourceInfo: SourceInfo) extends FormatEdge
{
// legacy names:
def manager = slave
def client = master
val maxTransfer = max(master.maxTransfer, slave.maxTransfer)
val maxLgSize = log2Ceil(maxTransfer)
// Sanity check the link...
require (maxTransfer >= slave.beatBytes, s"Link's max transfer (${maxTransfer}) < ${slave.slaves.map(_.name)}'s beatBytes (${slave.beatBytes})")
def diplomaticClaimsMasterToSlave = master.anyEmitClaims.intersect(slave.anySupportClaims)
val bundle = TLBundleParameters(master, slave)
def formatEdge = master.infoString + "\n" + slave.infoString
}
case class TLCreditedDelay(
a: CreditedDelay,
b: CreditedDelay,
c: CreditedDelay,
d: CreditedDelay,
e: CreditedDelay)
{
def + (that: TLCreditedDelay): TLCreditedDelay = TLCreditedDelay(
a = a + that.a,
b = b + that.b,
c = c + that.c,
d = d + that.d,
e = e + that.e)
override def toString = s"(${a}, ${b}, ${c}, ${d}, ${e})"
}
object TLCreditedDelay {
def apply(delay: CreditedDelay): TLCreditedDelay = apply(delay, delay.flip, delay, delay.flip, delay)
}
case class TLCreditedManagerPortParameters(delay: TLCreditedDelay, base: TLSlavePortParameters) {def infoString = base.infoString}
case class TLCreditedClientPortParameters(delay: TLCreditedDelay, base: TLMasterPortParameters) {def infoString = base.infoString}
case class TLCreditedEdgeParameters(client: TLCreditedClientPortParameters, manager: TLCreditedManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends FormatEdge
{
val delay = client.delay + manager.delay
val bundle = TLBundleParameters(client.base, manager.base)
def formatEdge = client.infoString + "\n" + manager.infoString
}
case class TLAsyncManagerPortParameters(async: AsyncQueueParams, base: TLSlavePortParameters) {def infoString = base.infoString}
case class TLAsyncClientPortParameters(base: TLMasterPortParameters) {def infoString = base.infoString}
case class TLAsyncBundleParameters(async: AsyncQueueParams, base: TLBundleParameters)
case class TLAsyncEdgeParameters(client: TLAsyncClientPortParameters, manager: TLAsyncManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends FormatEdge
{
val bundle = TLAsyncBundleParameters(manager.async, TLBundleParameters(client.base, manager.base))
def formatEdge = client.infoString + "\n" + manager.infoString
}
case class TLRationalManagerPortParameters(direction: RationalDirection, base: TLSlavePortParameters) {def infoString = base.infoString}
case class TLRationalClientPortParameters(base: TLMasterPortParameters) {def infoString = base.infoString}
case class TLRationalEdgeParameters(client: TLRationalClientPortParameters, manager: TLRationalManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends FormatEdge
{
val bundle = TLBundleParameters(client.base, manager.base)
def formatEdge = client.infoString + "\n" + manager.infoString
}
// To be unified, devices must agree on all of these terms
case class ManagerUnificationKey(
resources: Seq[Resource],
regionType: RegionType.T,
executable: Boolean,
supportsAcquireT: TransferSizes,
supportsAcquireB: TransferSizes,
supportsArithmetic: TransferSizes,
supportsLogical: TransferSizes,
supportsGet: TransferSizes,
supportsPutFull: TransferSizes,
supportsPutPartial: TransferSizes,
supportsHint: TransferSizes)
object ManagerUnificationKey
{
def apply(x: TLSlaveParameters): ManagerUnificationKey = ManagerUnificationKey(
resources = x.resources,
regionType = x.regionType,
executable = x.executable,
supportsAcquireT = x.supportsAcquireT,
supportsAcquireB = x.supportsAcquireB,
supportsArithmetic = x.supportsArithmetic,
supportsLogical = x.supportsLogical,
supportsGet = x.supportsGet,
supportsPutFull = x.supportsPutFull,
supportsPutPartial = x.supportsPutPartial,
supportsHint = x.supportsHint)
}
object ManagerUnification
{
def apply(slaves: Seq[TLSlaveParameters]): List[TLSlaveParameters] = {
slaves.groupBy(ManagerUnificationKey.apply).values.map { seq =>
val agree = seq.forall(_.fifoId == seq.head.fifoId)
seq(0).v1copy(
address = AddressSet.unify(seq.flatMap(_.address)),
fifoId = if (agree) seq(0).fifoId else None)
}.toList
}
}
case class TLBufferParams(
a: BufferParams = BufferParams.none,
b: BufferParams = BufferParams.none,
c: BufferParams = BufferParams.none,
d: BufferParams = BufferParams.none,
e: BufferParams = BufferParams.none
) extends DirectedBuffers[TLBufferParams] {
def copyIn(x: BufferParams) = this.copy(b = x, d = x)
def copyOut(x: BufferParams) = this.copy(a = x, c = x, e = x)
def copyInOut(x: BufferParams) = this.copyIn(x).copyOut(x)
}
/** Pretty printing of TL source id maps */
class TLSourceIdMap(tl: TLMasterPortParameters) extends IdMap[TLSourceIdMapEntry] {
private val tlDigits = String.valueOf(tl.endSourceId-1).length()
protected val fmt = s"\t[%${tlDigits}d, %${tlDigits}d) %s%s%s"
private val sorted = tl.masters.sortBy(_.sourceId)
val mapping: Seq[TLSourceIdMapEntry] = sorted.map { case c =>
TLSourceIdMapEntry(c.sourceId, c.name, c.supports.probe, c.requestFifo)
}
}
case class TLSourceIdMapEntry(tlId: IdRange, name: String, isCache: Boolean, requestFifo: Boolean)
extends IdMapEntry
{
val from = tlId
val to = tlId
val maxTransactionsInFlight = Some(tlId.size)
}
File Edges.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
class TLEdge(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdgeParameters(client, manager, params, sourceInfo)
{
def isAligned(address: UInt, lgSize: UInt): Bool = {
if (maxLgSize == 0) true.B else {
val mask = UIntToOH1(lgSize, maxLgSize)
(address & mask) === 0.U
}
}
def mask(address: UInt, lgSize: UInt): UInt =
MaskGen(address, lgSize, manager.beatBytes)
def staticHasData(bundle: TLChannel): Option[Boolean] = {
bundle match {
case _:TLBundleA => {
// Do there exist A messages with Data?
val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial
// Do there exist A messages without Data?
val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint
// Statically optimize the case where hasData is a constant
if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None
}
case _:TLBundleB => {
// Do there exist B messages with Data?
val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial
// Do there exist B messages without Data?
val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint
// Statically optimize the case where hasData is a constant
if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None
}
case _:TLBundleC => {
// Do there eixst C messages with Data?
val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe
// Do there exist C messages without Data?
val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe
if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None
}
case _:TLBundleD => {
// Do there eixst D messages with Data?
val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB
// Do there exist D messages without Data?
val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT
if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None
}
case _:TLBundleE => Some(false)
}
}
def isRequest(x: TLChannel): Bool = {
x match {
case a: TLBundleA => true.B
case b: TLBundleB => true.B
case c: TLBundleC => c.opcode(2) && c.opcode(1)
// opcode === TLMessages.Release ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(2) && !d.opcode(1)
// opcode === TLMessages.Grant ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
}
def isResponse(x: TLChannel): Bool = {
x match {
case a: TLBundleA => false.B
case b: TLBundleB => false.B
case c: TLBundleC => !c.opcode(2) || !c.opcode(1)
// opcode =/= TLMessages.Release &&
// opcode =/= TLMessages.ReleaseData
case d: TLBundleD => true.B // Grant isResponse + isRequest
case e: TLBundleE => true.B
}
}
def hasData(x: TLChannel): Bool = {
val opdata = x match {
case a: TLBundleA => !a.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case b: TLBundleB => !b.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case c: TLBundleC => c.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.ProbeAckData ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
staticHasData(x).map(_.B).getOrElse(opdata)
}
def opcode(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.opcode
case b: TLBundleB => b.opcode
case c: TLBundleC => c.opcode
case d: TLBundleD => d.opcode
}
}
def param(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.param
case b: TLBundleB => b.param
case c: TLBundleC => c.param
case d: TLBundleD => d.param
}
}
def size(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.size
case b: TLBundleB => b.size
case c: TLBundleC => c.size
case d: TLBundleD => d.size
}
}
def data(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.data
case b: TLBundleB => b.data
case c: TLBundleC => c.data
case d: TLBundleD => d.data
}
}
def corrupt(x: TLDataChannel): Bool = {
x match {
case a: TLBundleA => a.corrupt
case b: TLBundleB => b.corrupt
case c: TLBundleC => c.corrupt
case d: TLBundleD => d.corrupt
}
}
def mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.mask
case b: TLBundleB => b.mask
case c: TLBundleC => mask(c.address, c.size)
}
}
def full_mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => mask(a.address, a.size)
case b: TLBundleB => mask(b.address, b.size)
case c: TLBundleC => mask(c.address, c.size)
}
}
def address(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.address
case b: TLBundleB => b.address
case c: TLBundleC => c.address
}
}
def source(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.source
case b: TLBundleB => b.source
case c: TLBundleC => c.source
case d: TLBundleD => d.source
}
}
def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes)
def addr_lo(x: UInt): UInt =
if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0)
def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x))
def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x))
def numBeats(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 1.U
case bundle: TLDataChannel => {
val hasData = this.hasData(bundle)
val size = this.size(bundle)
val cutoff = log2Ceil(manager.beatBytes)
val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U
val decode = UIntToOH(size, maxLgSize+1) >> cutoff
Mux(hasData, decode | small.asUInt, 1.U)
}
}
}
def numBeats1(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 0.U
case bundle: TLDataChannel => {
if (maxLgSize == 0) {
0.U
} else {
val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes)
Mux(hasData(bundle), decode, 0.U)
}
}
}
}
def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val beats1 = numBeats1(bits)
val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W))
val counter1 = counter - 1.U
val first = counter === 0.U
val last = counter === 1.U || beats1 === 0.U
val done = last && fire
val count = (beats1 & ~counter1)
when (fire) {
counter := Mux(first, beats1, counter1)
}
(first, last, done, count)
}
def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1
def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire)
def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid)
def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2
def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire)
def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid)
def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3
def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire)
def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid)
def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3)
}
def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire)
def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid)
def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4)
}
def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire)
def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid)
def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes))
}
def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire)
def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid)
// Does the request need T permissions to be executed?
def needT(a: TLBundleA): Bool = {
val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLPermissions.NtoB -> false.B,
TLPermissions.NtoT -> true.B,
TLPermissions.BtoT -> true.B))
MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array(
TLMessages.PutFullData -> true.B,
TLMessages.PutPartialData -> true.B,
TLMessages.ArithmeticData -> true.B,
TLMessages.LogicalData -> true.B,
TLMessages.Get -> false.B,
TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLHints.PREFETCH_READ -> false.B,
TLHints.PREFETCH_WRITE -> true.B)),
TLMessages.AcquireBlock -> acq_needT,
TLMessages.AcquirePerm -> acq_needT))
}
// This is a very expensive circuit; use only if you really mean it!
def inFlight(x: TLBundle): (UInt, UInt) = {
val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W))
val bce = manager.anySupportAcquireB && client.anySupportProbe
val (a_first, a_last, _) = firstlast(x.a)
val (b_first, b_last, _) = firstlast(x.b)
val (c_first, c_last, _) = firstlast(x.c)
val (d_first, d_last, _) = firstlast(x.d)
val (e_first, e_last, _) = firstlast(x.e)
val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits))
val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits))
val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits))
val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits))
val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits))
val a_inc = x.a.fire && a_first && a_request
val b_inc = x.b.fire && b_first && b_request
val c_inc = x.c.fire && c_first && c_request
val d_inc = x.d.fire && d_first && d_request
val e_inc = x.e.fire && e_first && e_request
val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil))
val a_dec = x.a.fire && a_last && a_response
val b_dec = x.b.fire && b_last && b_response
val c_dec = x.c.fire && c_last && c_response
val d_dec = x.d.fire && d_last && d_response
val e_dec = x.e.fire && e_last && e_response
val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil))
val next_flight = flight + PopCount(inc) - PopCount(dec)
flight := next_flight
(flight, next_flight)
}
def prettySourceMapping(context: String): String = {
s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n"
}
}
class TLEdgeOut(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
// Transfers
def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquireBlock
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquirePerm
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.Release
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ReleaseData
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) =
Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B)
def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAck
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions, data)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAckData
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B)
def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink)
def GrantAck(toSink: UInt): TLBundleE = {
val e = Wire(new TLBundleE(bundle))
e.sink := toSink
e
}
// Accesses
def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsGetFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Get
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutFullFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutFullData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, mask, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutPartialFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutPartialData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = {
require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsArithmeticFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.ArithmeticData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsLogicalFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.LogicalData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = {
require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsHintFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Hint
a.param := param
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data)
def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAckData
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size)
def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.HintAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
}
class TLEdgeIn(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = {
val todo = x.filter(!_.isEmpty)
val heads = todo.map(_.head)
val tails = todo.map(_.tail)
if (todo.isEmpty) Nil else { heads +: myTranspose(tails) }
}
// Transfers
def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = {
require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}")
val legal = client.supportsProbe(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Probe
b.param := capPermissions
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.Grant
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.GrantData
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B)
def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.ReleaseAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
// Accesses
def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = {
require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsGet(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Get
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsPutFull(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutFullData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, mask, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsPutPartial(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutPartialData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsArithmetic(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.ArithmeticData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsLogical(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.LogicalData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = {
require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsHint(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Hint
b.param := param
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size)
def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied)
def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data)
def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAckData
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B)
def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied)
def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B)
def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.HintAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
}
| module WritebackUnit( // @[NBDcache.scala:471:7]
input clock, // @[NBDcache.scala:471:7]
input reset, // @[NBDcache.scala:471:7]
output io_req_ready, // @[NBDcache.scala:472:14]
input io_req_valid, // @[NBDcache.scala:472:14]
input [19:0] io_req_bits_tag, // @[NBDcache.scala:472:14]
input [5:0] io_req_bits_idx, // @[NBDcache.scala:472:14]
input [1:0] io_req_bits_source, // @[NBDcache.scala:472:14]
input [2:0] io_req_bits_param, // @[NBDcache.scala:472:14]
input [7:0] io_req_bits_way_en, // @[NBDcache.scala:472:14]
input io_req_bits_voluntary, // @[NBDcache.scala:472:14]
input io_meta_read_ready, // @[NBDcache.scala:472:14]
output io_meta_read_valid, // @[NBDcache.scala:472:14]
output [5:0] io_meta_read_bits_idx, // @[NBDcache.scala:472:14]
output [19:0] io_meta_read_bits_tag, // @[NBDcache.scala:472:14]
input io_data_req_ready, // @[NBDcache.scala:472:14]
output io_data_req_valid, // @[NBDcache.scala:472:14]
output [7:0] io_data_req_bits_way_en, // @[NBDcache.scala:472:14]
output [11:0] io_data_req_bits_addr, // @[NBDcache.scala:472:14]
input [63:0] io_data_resp, // @[NBDcache.scala:472:14]
input io_release_ready, // @[NBDcache.scala:472:14]
output io_release_valid, // @[NBDcache.scala:472:14]
output [2:0] io_release_bits_opcode, // @[NBDcache.scala:472:14]
output [2:0] io_release_bits_param, // @[NBDcache.scala:472:14]
output [1:0] io_release_bits_source, // @[NBDcache.scala:472:14]
output [31:0] io_release_bits_address, // @[NBDcache.scala:472:14]
output [63:0] io_release_bits_data // @[NBDcache.scala:472:14]
);
wire io_req_valid_0 = io_req_valid; // @[NBDcache.scala:471:7]
wire [19:0] io_req_bits_tag_0 = io_req_bits_tag; // @[NBDcache.scala:471:7]
wire [5:0] io_req_bits_idx_0 = io_req_bits_idx; // @[NBDcache.scala:471:7]
wire [1:0] io_req_bits_source_0 = io_req_bits_source; // @[NBDcache.scala:471:7]
wire [2:0] io_req_bits_param_0 = io_req_bits_param; // @[NBDcache.scala:471:7]
wire [7:0] io_req_bits_way_en_0 = io_req_bits_way_en; // @[NBDcache.scala:471:7]
wire io_req_bits_voluntary_0 = io_req_bits_voluntary; // @[NBDcache.scala:471:7]
wire io_meta_read_ready_0 = io_meta_read_ready; // @[NBDcache.scala:471:7]
wire io_data_req_ready_0 = io_data_req_ready; // @[NBDcache.scala:471:7]
wire [63:0] io_data_resp_0 = io_data_resp; // @[NBDcache.scala:471:7]
wire io_release_ready_0 = io_release_ready; // @[NBDcache.scala:471:7]
wire [26:0] _r_beats1_decode_T = 27'h3FFC0; // @[package.scala:243:71]
wire [11:0] _r_beats1_decode_T_1 = 12'hFC0; // @[package.scala:243:76]
wire [11:0] _r_beats1_decode_T_2 = 12'h3F; // @[package.scala:243:46]
wire [8:0] r_beats1_decode = 9'h7; // @[Edges.scala:220:59]
wire [2:0] probeResponse_opcode = 3'h5; // @[Edges.scala:433:17]
wire _voluntaryRelease_legal_T_19 = 1'h1; // @[Parameters.scala:91:44]
wire _voluntaryRelease_legal_T_20 = 1'h1; // @[Parameters.scala:684:29]
wire [2:0] voluntaryRelease_opcode = 3'h7; // @[Edges.scala:396:17]
wire io_release_bits_corrupt = 1'h0; // @[NBDcache.scala:471:7]
wire probeResponse_corrupt = 1'h0; // @[Edges.scala:433:17]
wire _voluntaryRelease_legal_T = 1'h0; // @[Parameters.scala:684:29]
wire _voluntaryRelease_legal_T_18 = 1'h0; // @[Parameters.scala:684:54]
wire _voluntaryRelease_legal_T_33 = 1'h0; // @[Parameters.scala:686:26]
wire voluntaryRelease_corrupt = 1'h0; // @[Edges.scala:396:17]
wire _io_release_bits_T_corrupt = 1'h0; // @[NBDcache.scala:545:25]
wire [3:0] io_release_bits_size = 4'h6; // @[NBDcache.scala:471:7]
wire [3:0] probeResponse_size = 4'h6; // @[Edges.scala:433:17]
wire [3:0] voluntaryRelease_size = 4'h6; // @[Edges.scala:396:17]
wire [3:0] _io_release_bits_T_size = 4'h6; // @[NBDcache.scala:545:25]
wire [7:0] io_meta_read_bits_way_en = 8'hFF; // @[NBDcache.scala:471:7]
wire _io_req_ready_T; // @[NBDcache.scala:514:19]
wire [7:0] _io_meta_read_bits_way_en_T = 8'hFF; // @[NBDcache.scala:522:31]
wire fire; // @[NBDcache.scala:516:21]
wire [11:0] _io_data_req_bits_addr_T_2; // @[NBDcache.scala:528:43]
wire [63:0] probeResponse_data = io_data_resp_0; // @[Edges.scala:433:17]
wire [63:0] voluntaryRelease_data = io_data_resp_0; // @[Edges.scala:396:17]
wire [2:0] _io_release_bits_T_opcode; // @[NBDcache.scala:545:25]
wire [2:0] _io_release_bits_T_param; // @[NBDcache.scala:545:25]
wire [1:0] _io_release_bits_T_source; // @[NBDcache.scala:545:25]
wire [31:0] _io_release_bits_T_address; // @[NBDcache.scala:545:25]
wire [63:0] _io_release_bits_T_data; // @[NBDcache.scala:545:25]
wire io_req_ready_0; // @[NBDcache.scala:471:7]
wire [5:0] io_meta_read_bits_idx_0; // @[NBDcache.scala:471:7]
wire [19:0] io_meta_read_bits_tag_0; // @[NBDcache.scala:471:7]
wire io_meta_read_valid_0; // @[NBDcache.scala:471:7]
wire [7:0] io_data_req_bits_way_en_0; // @[NBDcache.scala:471:7]
wire [11:0] io_data_req_bits_addr_0; // @[NBDcache.scala:471:7]
wire io_data_req_valid_0; // @[NBDcache.scala:471:7]
wire [2:0] io_release_bits_opcode_0; // @[NBDcache.scala:471:7]
wire [2:0] io_release_bits_param_0; // @[NBDcache.scala:471:7]
wire [1:0] io_release_bits_source_0; // @[NBDcache.scala:471:7]
wire [31:0] io_release_bits_address_0; // @[NBDcache.scala:471:7]
wire [63:0] io_release_bits_data_0; // @[NBDcache.scala:471:7]
wire io_release_valid_0; // @[NBDcache.scala:471:7]
reg [19:0] req_tag; // @[NBDcache.scala:480:16]
assign io_meta_read_bits_tag_0 = req_tag; // @[NBDcache.scala:471:7, :480:16]
reg [5:0] req_idx; // @[NBDcache.scala:480:16]
assign io_meta_read_bits_idx_0 = req_idx; // @[NBDcache.scala:471:7, :480:16]
reg [1:0] req_source; // @[NBDcache.scala:480:16]
wire [1:0] probeResponse_source = req_source; // @[Edges.scala:433:17]
wire [1:0] voluntaryRelease_source = req_source; // @[Edges.scala:396:17]
reg [2:0] req_param; // @[NBDcache.scala:480:16]
wire [2:0] probeResponse_param = req_param; // @[Edges.scala:433:17]
wire [2:0] voluntaryRelease_param = req_param; // @[Edges.scala:396:17]
reg [7:0] req_way_en; // @[NBDcache.scala:480:16]
assign io_data_req_bits_way_en_0 = req_way_en; // @[NBDcache.scala:471:7, :480:16]
reg req_voluntary; // @[NBDcache.scala:480:16]
reg active; // @[NBDcache.scala:481:23]
reg r1_data_req_fired; // @[NBDcache.scala:482:34]
wire _data_req_cnt_T_2 = r1_data_req_fired; // @[NBDcache.scala:482:34, :500:71]
reg r2_data_req_fired; // @[NBDcache.scala:483:34]
reg [3:0] data_req_cnt; // @[NBDcache.scala:484:29]
wire _T = io_release_ready_0 & io_release_valid_0; // @[Decoupled.scala:51:35]
wire r_beats1_opdata = io_release_bits_opcode_0[0]; // @[Edges.scala:102:36]
wire [8:0] r_beats1 = r_beats1_opdata ? 9'h7 : 9'h0; // @[Edges.scala:102:36, :220:59, :221:14]
reg [8:0] r_counter; // @[Edges.scala:229:27]
wire [9:0] _r_counter1_T = {1'h0, r_counter} - 10'h1; // @[Edges.scala:229:27, :230:28]
wire [8:0] r_counter1 = _r_counter1_T[8:0]; // @[Edges.scala:230:28]
wire r_1 = r_counter == 9'h0; // @[Edges.scala:229:27, :231:25]
wire _r_last_T = r_counter == 9'h1; // @[Edges.scala:229:27, :232:25]
wire _r_last_T_1 = r_beats1 == 9'h0; // @[Edges.scala:221:14, :232:43]
wire last_beat = _r_last_T | _r_last_T_1; // @[Edges.scala:232:{25,33,43}]
wire all_beats_done = last_beat & _T; // @[Decoupled.scala:51:35]
wire [8:0] _r_count_T = ~r_counter1; // @[Edges.scala:230:28, :234:27]
wire [8:0] beat_count = r_beats1 & _r_count_T; // @[Edges.scala:221:14, :234:{25,27}]
wire [8:0] _r_counter_T = r_1 ? r_beats1 : r_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [4:0] _GEN = {1'h0, data_req_cnt}; // @[NBDcache.scala:484:29, :493:36]
wire [4:0] _data_req_cnt_T = _GEN + 5'h1; // @[NBDcache.scala:493:36]
wire [3:0] _data_req_cnt_T_1 = _data_req_cnt_T[3:0]; // @[NBDcache.scala:493:36]
assign io_release_valid_0 = active & r2_data_req_fired; // @[NBDcache.scala:471:7, :481:23, :483:34, :487:20, :488:17, :495:30]
wire [1:0] _data_req_cnt_T_3 = _data_req_cnt_T_2 ? 2'h2 : 2'h1; // @[NBDcache.scala:500:{49,71}]
wire [4:0] _data_req_cnt_T_4 = _GEN - {3'h0, _data_req_cnt_T_3}; // @[NBDcache.scala:493:36, :500:{38,49}]
wire [3:0] _data_req_cnt_T_5 = _data_req_cnt_T_4[3:0]; // @[NBDcache.scala:500:38]
wire _active_T = ~(data_req_cnt[3]); // @[NBDcache.scala:484:29, :504:32]
wire _active_T_1 = ~io_release_ready_0; // @[NBDcache.scala:471:7, :497:12, :504:52]
wire _active_T_2 = _active_T | _active_T_1; // @[NBDcache.scala:504:{32,49,52}]
assign _io_req_ready_T = ~active; // @[NBDcache.scala:481:23, :514:19]
assign io_req_ready_0 = _io_req_ready_T; // @[NBDcache.scala:471:7, :514:19]
wire _fire_T = ~(data_req_cnt[3]); // @[NBDcache.scala:484:29, :504:32, :516:37]
assign fire = active & _fire_T; // @[NBDcache.scala:481:23, :516:{21,37}]
assign io_meta_read_valid_0 = fire; // @[NBDcache.scala:471:7, :516:21]
assign io_data_req_valid_0 = fire; // @[NBDcache.scala:471:7, :516:21]
wire [2:0] _io_data_req_bits_addr_T = data_req_cnt[2:0]; // @[NBDcache.scala:484:29, :527:56]
wire [8:0] _io_data_req_bits_addr_T_1 = {req_idx, _io_data_req_bits_addr_T}; // @[NBDcache.scala:480:16, :527:{34,56}]
assign _io_data_req_bits_addr_T_2 = {_io_data_req_bits_addr_T_1, 3'h0}; // @[NBDcache.scala:500:38, :527:34, :528:43]
assign io_data_req_bits_addr_0 = _io_data_req_bits_addr_T_2; // @[NBDcache.scala:471:7, :528:43]
wire [25:0] _r_address_T = {req_tag, req_idx}; // @[NBDcache.scala:480:16, :530:22]
wire [31:0] r_address = {_r_address_T, 6'h0}; // @[NBDcache.scala:530:{22,41}]
wire [31:0] probeResponse_address = r_address; // @[Edges.scala:433:17]
wire [31:0] _voluntaryRelease_legal_T_1 = r_address; // @[NBDcache.scala:530:41]
wire [31:0] voluntaryRelease_address = r_address; // @[Edges.scala:396:17]
wire [32:0] _voluntaryRelease_legal_T_2 = {1'h0, _voluntaryRelease_legal_T_1}; // @[Parameters.scala:137:{31,41}]
wire [32:0] _voluntaryRelease_legal_T_3 = _voluntaryRelease_legal_T_2 & 33'h8C000000; // @[Parameters.scala:137:{41,46}]
wire [32:0] _voluntaryRelease_legal_T_4 = _voluntaryRelease_legal_T_3; // @[Parameters.scala:137:46]
wire _voluntaryRelease_legal_T_5 = _voluntaryRelease_legal_T_4 == 33'h0; // @[Parameters.scala:137:{46,59}]
wire [31:0] _voluntaryRelease_legal_T_6 = {r_address[31:17], r_address[16:0] ^ 17'h10000}; // @[NBDcache.scala:530:41]
wire [32:0] _voluntaryRelease_legal_T_7 = {1'h0, _voluntaryRelease_legal_T_6}; // @[Parameters.scala:137:{31,41}]
wire [32:0] _voluntaryRelease_legal_T_8 = _voluntaryRelease_legal_T_7 & 33'h8C011000; // @[Parameters.scala:137:{41,46}]
wire [32:0] _voluntaryRelease_legal_T_9 = _voluntaryRelease_legal_T_8; // @[Parameters.scala:137:46]
wire _voluntaryRelease_legal_T_10 = _voluntaryRelease_legal_T_9 == 33'h0; // @[Parameters.scala:137:{46,59}]
wire [31:0] _voluntaryRelease_legal_T_11 = {r_address[31:28], r_address[27:0] ^ 28'hC000000}; // @[NBDcache.scala:530:41]
wire [32:0] _voluntaryRelease_legal_T_12 = {1'h0, _voluntaryRelease_legal_T_11}; // @[Parameters.scala:137:{31,41}]
wire [32:0] _voluntaryRelease_legal_T_13 = _voluntaryRelease_legal_T_12 & 33'h8C000000; // @[Parameters.scala:137:{41,46}]
wire [32:0] _voluntaryRelease_legal_T_14 = _voluntaryRelease_legal_T_13; // @[Parameters.scala:137:46]
wire _voluntaryRelease_legal_T_15 = _voluntaryRelease_legal_T_14 == 33'h0; // @[Parameters.scala:137:{46,59}]
wire _voluntaryRelease_legal_T_16 = _voluntaryRelease_legal_T_5 | _voluntaryRelease_legal_T_10; // @[Parameters.scala:685:42]
wire _voluntaryRelease_legal_T_17 = _voluntaryRelease_legal_T_16 | _voluntaryRelease_legal_T_15; // @[Parameters.scala:685:42]
wire [31:0] _voluntaryRelease_legal_T_21 = {r_address[31:28], r_address[27:0] ^ 28'h8000000}; // @[NBDcache.scala:530:41]
wire [32:0] _voluntaryRelease_legal_T_22 = {1'h0, _voluntaryRelease_legal_T_21}; // @[Parameters.scala:137:{31,41}]
wire [32:0] _voluntaryRelease_legal_T_23 = _voluntaryRelease_legal_T_22 & 33'h8C010000; // @[Parameters.scala:137:{41,46}]
wire [32:0] _voluntaryRelease_legal_T_24 = _voluntaryRelease_legal_T_23; // @[Parameters.scala:137:46]
wire _voluntaryRelease_legal_T_25 = _voluntaryRelease_legal_T_24 == 33'h0; // @[Parameters.scala:137:{46,59}]
wire [31:0] _voluntaryRelease_legal_T_26 = r_address ^ 32'h80000000; // @[NBDcache.scala:530:41]
wire [32:0] _voluntaryRelease_legal_T_27 = {1'h0, _voluntaryRelease_legal_T_26}; // @[Parameters.scala:137:{31,41}]
wire [32:0] _voluntaryRelease_legal_T_28 = _voluntaryRelease_legal_T_27 & 33'h80000000; // @[Parameters.scala:137:{41,46}]
wire [32:0] _voluntaryRelease_legal_T_29 = _voluntaryRelease_legal_T_28; // @[Parameters.scala:137:46]
wire _voluntaryRelease_legal_T_30 = _voluntaryRelease_legal_T_29 == 33'h0; // @[Parameters.scala:137:{46,59}]
wire _voluntaryRelease_legal_T_31 = _voluntaryRelease_legal_T_25 | _voluntaryRelease_legal_T_30; // @[Parameters.scala:685:42]
wire _voluntaryRelease_legal_T_32 = _voluntaryRelease_legal_T_31; // @[Parameters.scala:684:54, :685:42]
wire voluntaryRelease_legal = _voluntaryRelease_legal_T_32; // @[Parameters.scala:684:54, :686:26]
assign _io_release_bits_T_opcode = {1'h1, req_voluntary, 1'h1}; // @[NBDcache.scala:480:16, :545:25]
assign _io_release_bits_T_param = req_voluntary ? voluntaryRelease_param : probeResponse_param; // @[Edges.scala:396:17, :433:17]
assign _io_release_bits_T_source = req_voluntary ? voluntaryRelease_source : probeResponse_source; // @[Edges.scala:396:17, :433:17]
assign _io_release_bits_T_address = req_voluntary ? voluntaryRelease_address : probeResponse_address; // @[Edges.scala:396:17, :433:17]
assign _io_release_bits_T_data = req_voluntary ? voluntaryRelease_data : probeResponse_data; // @[Edges.scala:396:17, :433:17]
assign io_release_bits_opcode_0 = _io_release_bits_T_opcode; // @[NBDcache.scala:471:7, :545:25]
assign io_release_bits_param_0 = _io_release_bits_T_param; // @[NBDcache.scala:471:7, :545:25]
assign io_release_bits_source_0 = _io_release_bits_T_source; // @[NBDcache.scala:471:7, :545:25]
assign io_release_bits_address_0 = _io_release_bits_T_address; // @[NBDcache.scala:471:7, :545:25]
assign io_release_bits_data_0 = _io_release_bits_T_data; // @[NBDcache.scala:471:7, :545:25]
wire _T_3 = io_data_req_ready_0 & io_data_req_valid_0 & io_meta_read_ready_0 & io_meta_read_valid_0; // @[Decoupled.scala:51:35]
wire _GEN_0 = r2_data_req_fired & ~io_release_ready_0; // @[NBDcache.scala:471:7, :483:34, :491:50, :495:30, :497:{12,31}, :498:27]
wire _T_6 = io_req_ready_0 & io_req_valid_0; // @[Decoupled.scala:51:35]
always @(posedge clock) begin // @[NBDcache.scala:471:7]
if (_T_6) begin // @[Decoupled.scala:51:35]
req_tag <= io_req_bits_tag_0; // @[NBDcache.scala:471:7, :480:16]
req_idx <= io_req_bits_idx_0; // @[NBDcache.scala:471:7, :480:16]
req_source <= io_req_bits_source_0; // @[NBDcache.scala:471:7, :480:16]
req_param <= io_req_bits_param_0; // @[NBDcache.scala:471:7, :480:16]
req_way_en <= io_req_bits_way_en_0; // @[NBDcache.scala:471:7, :480:16]
req_voluntary <= io_req_bits_voluntary_0; // @[NBDcache.scala:471:7, :480:16]
end
if (reset) begin // @[NBDcache.scala:471:7]
active <= 1'h0; // @[NBDcache.scala:481:23]
r1_data_req_fired <= 1'h0; // @[NBDcache.scala:482:34]
r2_data_req_fired <= 1'h0; // @[NBDcache.scala:483:34]
data_req_cnt <= 4'h0; // @[NBDcache.scala:484:29]
r_counter <= 9'h0; // @[Edges.scala:229:27]
end
else begin // @[NBDcache.scala:471:7]
active <= _T_6 | (active & r2_data_req_fired & ~r1_data_req_fired ? _active_T_2 : active); // @[Decoupled.scala:51:35]
if (active) begin // @[NBDcache.scala:481:23]
r1_data_req_fired <= ~_GEN_0 & _T_3; // @[Decoupled.scala:51:35]
r2_data_req_fired <= ~_GEN_0 & r1_data_req_fired; // @[NBDcache.scala:482:34, :483:34, :490:23, :491:50, :495:30, :497:31, :498:27, :499:27]
end
if (_T_6) // @[Decoupled.scala:51:35]
data_req_cnt <= 4'h0; // @[NBDcache.scala:484:29]
else if (active) begin // @[NBDcache.scala:481:23]
if (_GEN_0) // @[NBDcache.scala:491:50, :495:30, :497:31, :498:27]
data_req_cnt <= _data_req_cnt_T_5; // @[NBDcache.scala:484:29, :500:38]
else if (_T_3) // @[Decoupled.scala:51:35]
data_req_cnt <= _data_req_cnt_T_1; // @[NBDcache.scala:484:29, :493:36]
end
if (_T) // @[Decoupled.scala:51:35]
r_counter <= _r_counter_T; // @[Edges.scala:229:27, :236:21]
end
always @(posedge)
assign io_req_ready = io_req_ready_0; // @[NBDcache.scala:471:7]
assign io_meta_read_valid = io_meta_read_valid_0; // @[NBDcache.scala:471:7]
assign io_meta_read_bits_idx = io_meta_read_bits_idx_0; // @[NBDcache.scala:471:7]
assign io_meta_read_bits_tag = io_meta_read_bits_tag_0; // @[NBDcache.scala:471:7]
assign io_data_req_valid = io_data_req_valid_0; // @[NBDcache.scala:471:7]
assign io_data_req_bits_way_en = io_data_req_bits_way_en_0; // @[NBDcache.scala:471:7]
assign io_data_req_bits_addr = io_data_req_bits_addr_0; // @[NBDcache.scala:471:7]
assign io_release_valid = io_release_valid_0; // @[NBDcache.scala:471:7]
assign io_release_bits_opcode = io_release_bits_opcode_0; // @[NBDcache.scala:471:7]
assign io_release_bits_param = io_release_bits_param_0; // @[NBDcache.scala:471:7]
assign io_release_bits_source = io_release_bits_source_0; // @[NBDcache.scala:471:7]
assign io_release_bits_address = io_release_bits_address_0; // @[NBDcache.scala:471:7]
assign io_release_bits_data = io_release_bits_data_0; // @[NBDcache.scala:471:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File PE.scala:
// See README.md for license details.
package gemmini
import chisel3._
import chisel3.util._
class PEControl[T <: Data : Arithmetic](accType: T) extends Bundle {
val dataflow = UInt(1.W) // TODO make this an Enum
val propagate = UInt(1.W) // Which register should be propagated (and which should be accumulated)?
val shift = UInt(log2Up(accType.getWidth).W) // TODO this isn't correct for Floats
}
class MacUnit[T <: Data](inputType: T, cType: T, dType: T) (implicit ev: Arithmetic[T]) extends Module {
import ev._
val io = IO(new Bundle {
val in_a = Input(inputType)
val in_b = Input(inputType)
val in_c = Input(cType)
val out_d = Output(dType)
})
io.out_d := io.in_c.mac(io.in_a, io.in_b)
}
// TODO update documentation
/**
* A PE implementing a MAC operation. Configured as fully combinational when integrated into a Mesh.
* @param width Data width of operands
*/
class PE[T <: Data](inputType: T, outputType: T, accType: T, df: Dataflow.Value, max_simultaneous_matmuls: Int)
(implicit ev: Arithmetic[T]) extends Module { // Debugging variables
import ev._
val io = IO(new Bundle {
val in_a = Input(inputType)
val in_b = Input(outputType)
val in_d = Input(outputType)
val out_a = Output(inputType)
val out_b = Output(outputType)
val out_c = Output(outputType)
val in_control = Input(new PEControl(accType))
val out_control = Output(new PEControl(accType))
val in_id = Input(UInt(log2Up(max_simultaneous_matmuls).W))
val out_id = Output(UInt(log2Up(max_simultaneous_matmuls).W))
val in_last = Input(Bool())
val out_last = Output(Bool())
val in_valid = Input(Bool())
val out_valid = Output(Bool())
val bad_dataflow = Output(Bool())
})
val cType = if (df == Dataflow.WS) inputType else accType
// When creating PEs that support multiple dataflows, the
// elaboration/synthesis tools often fail to consolidate and de-duplicate
// MAC units. To force mac circuitry to be re-used, we create a "mac_unit"
// module here which just performs a single MAC operation
val mac_unit = Module(new MacUnit(inputType,
if (df == Dataflow.WS) outputType else accType, outputType))
val a = io.in_a
val b = io.in_b
val d = io.in_d
val c1 = Reg(cType)
val c2 = Reg(cType)
val dataflow = io.in_control.dataflow
val prop = io.in_control.propagate
val shift = io.in_control.shift
val id = io.in_id
val last = io.in_last
val valid = io.in_valid
io.out_a := a
io.out_control.dataflow := dataflow
io.out_control.propagate := prop
io.out_control.shift := shift
io.out_id := id
io.out_last := last
io.out_valid := valid
mac_unit.io.in_a := a
val last_s = RegEnable(prop, valid)
val flip = last_s =/= prop
val shift_offset = Mux(flip, shift, 0.U)
// Which dataflow are we using?
val OUTPUT_STATIONARY = Dataflow.OS.id.U(1.W)
val WEIGHT_STATIONARY = Dataflow.WS.id.U(1.W)
// Is c1 being computed on, or propagated forward (in the output-stationary dataflow)?
val COMPUTE = 0.U(1.W)
val PROPAGATE = 1.U(1.W)
io.bad_dataflow := false.B
when ((df == Dataflow.OS).B || ((df == Dataflow.BOTH).B && dataflow === OUTPUT_STATIONARY)) {
when(prop === PROPAGATE) {
io.out_c := (c1 >> shift_offset).clippedToWidthOf(outputType)
io.out_b := b
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c2
c2 := mac_unit.io.out_d
c1 := d.withWidthOf(cType)
}.otherwise {
io.out_c := (c2 >> shift_offset).clippedToWidthOf(outputType)
io.out_b := b
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c1
c1 := mac_unit.io.out_d
c2 := d.withWidthOf(cType)
}
}.elsewhen ((df == Dataflow.WS).B || ((df == Dataflow.BOTH).B && dataflow === WEIGHT_STATIONARY)) {
when(prop === PROPAGATE) {
io.out_c := c1
mac_unit.io.in_b := c2.asTypeOf(inputType)
mac_unit.io.in_c := b
io.out_b := mac_unit.io.out_d
c1 := d
}.otherwise {
io.out_c := c2
mac_unit.io.in_b := c1.asTypeOf(inputType)
mac_unit.io.in_c := b
io.out_b := mac_unit.io.out_d
c2 := d
}
}.otherwise {
io.bad_dataflow := true.B
//assert(false.B, "unknown dataflow")
io.out_c := DontCare
io.out_b := DontCare
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c2
}
when (!valid) {
c1 := c1
c2 := c2
mac_unit.io.in_b := DontCare
mac_unit.io.in_c := DontCare
}
}
File Arithmetic.scala:
// A simple type class for Chisel datatypes that can add and multiply. To add your own type, simply create your own:
// implicit MyTypeArithmetic extends Arithmetic[MyType] { ... }
package gemmini
import chisel3._
import chisel3.util._
import hardfloat._
// Bundles that represent the raw bits of custom datatypes
case class Float(expWidth: Int, sigWidth: Int) extends Bundle {
val bits = UInt((expWidth + sigWidth).W)
val bias: Int = (1 << (expWidth-1)) - 1
}
case class DummySInt(w: Int) extends Bundle {
val bits = UInt(w.W)
def dontCare: DummySInt = {
val o = Wire(new DummySInt(w))
o.bits := 0.U
o
}
}
// The Arithmetic typeclass which implements various arithmetic operations on custom datatypes
abstract class Arithmetic[T <: Data] {
implicit def cast(t: T): ArithmeticOps[T]
}
abstract class ArithmeticOps[T <: Data](self: T) {
def *(t: T): T
def mac(m1: T, m2: T): T // Returns (m1 * m2 + self)
def +(t: T): T
def -(t: T): T
def >>(u: UInt): T // This is a rounding shift! Rounds away from 0
def >(t: T): Bool
def identity: T
def withWidthOf(t: T): T
def clippedToWidthOf(t: T): T // Like "withWidthOf", except that it saturates
def relu: T
def zero: T
def minimum: T
// Optional parameters, which only need to be defined if you want to enable various optimizations for transformers
def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[T])] = None
def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[T])] = None
def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = None
def mult_with_reciprocal[U <: Data](reciprocal: U) = self
}
object Arithmetic {
implicit object UIntArithmetic extends Arithmetic[UInt] {
override implicit def cast(self: UInt) = new ArithmeticOps(self) {
override def *(t: UInt) = self * t
override def mac(m1: UInt, m2: UInt) = m1 * m2 + self
override def +(t: UInt) = self + t
override def -(t: UInt) = self - t
override def >>(u: UInt) = {
// The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm
// TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here?
val point_five = Mux(u === 0.U, 0.U, self(u - 1.U))
val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U
val ones_digit = self(u)
val r = point_five & (zeros | ones_digit)
(self >> u).asUInt + r
}
override def >(t: UInt): Bool = self > t
override def withWidthOf(t: UInt) = self.asTypeOf(t)
override def clippedToWidthOf(t: UInt) = {
val sat = ((1 << (t.getWidth-1))-1).U
Mux(self > sat, sat, self)(t.getWidth-1, 0)
}
override def relu: UInt = self
override def zero: UInt = 0.U
override def identity: UInt = 1.U
override def minimum: UInt = 0.U
}
}
implicit object SIntArithmetic extends Arithmetic[SInt] {
override implicit def cast(self: SInt) = new ArithmeticOps(self) {
override def *(t: SInt) = self * t
override def mac(m1: SInt, m2: SInt) = m1 * m2 + self
override def +(t: SInt) = self + t
override def -(t: SInt) = self - t
override def >>(u: UInt) = {
// The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm
// TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here?
val point_five = Mux(u === 0.U, 0.U, self(u - 1.U))
val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U
val ones_digit = self(u)
val r = (point_five & (zeros | ones_digit)).asBool
(self >> u).asSInt + Mux(r, 1.S, 0.S)
}
override def >(t: SInt): Bool = self > t
override def withWidthOf(t: SInt) = {
if (self.getWidth >= t.getWidth)
self(t.getWidth-1, 0).asSInt
else {
val sign_bits = t.getWidth - self.getWidth
val sign = self(self.getWidth-1)
Cat(Cat(Seq.fill(sign_bits)(sign)), self).asTypeOf(t)
}
}
override def clippedToWidthOf(t: SInt): SInt = {
val maxsat = ((1 << (t.getWidth-1))-1).S
val minsat = (-(1 << (t.getWidth-1))).S
MuxCase(self, Seq((self > maxsat) -> maxsat, (self < minsat) -> minsat))(t.getWidth-1, 0).asSInt
}
override def relu: SInt = Mux(self >= 0.S, self, 0.S)
override def zero: SInt = 0.S
override def identity: SInt = 1.S
override def minimum: SInt = (-(1 << (self.getWidth-1))).S
override def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = {
// TODO this uses a floating point divider, but we should use an integer divider instead
val input = Wire(Decoupled(denom_t.cloneType))
val output = Wire(Decoupled(self.cloneType))
// We translate our integer to floating-point form so that we can use the hardfloat divider
val expWidth = log2Up(self.getWidth) + 1
val sigWidth = self.getWidth
def sin_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def uin_to_float(x: UInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := false.B
in_to_rec_fn.io.in := x
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = sin_to_float(self)
val denom_rec = uin_to_float(input.bits)
// Instantiate the hardloat divider
val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options))
input.ready := divider.io.inReady
divider.io.inValid := input.valid
divider.io.sqrtOp := false.B
divider.io.a := self_rec
divider.io.b := denom_rec
divider.io.roundingMode := consts.round_minMag
divider.io.detectTininess := consts.tininess_afterRounding
output.valid := divider.io.outValid_div
output.bits := float_to_in(divider.io.out)
assert(!output.valid || output.ready)
Some((input, output))
}
override def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = {
// TODO this uses a floating point divider, but we should use an integer divider instead
val input = Wire(Decoupled(UInt(0.W)))
val output = Wire(Decoupled(self.cloneType))
input.bits := DontCare
// We translate our integer to floating-point form so that we can use the hardfloat divider
val expWidth = log2Up(self.getWidth) + 1
val sigWidth = self.getWidth
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = in_to_float(self)
// Instantiate the hardloat sqrt
val sqrter = Module(new DivSqrtRecFN_small(expWidth, sigWidth, 0))
input.ready := sqrter.io.inReady
sqrter.io.inValid := input.valid
sqrter.io.sqrtOp := true.B
sqrter.io.a := self_rec
sqrter.io.b := DontCare
sqrter.io.roundingMode := consts.round_minMag
sqrter.io.detectTininess := consts.tininess_afterRounding
output.valid := sqrter.io.outValid_sqrt
output.bits := float_to_in(sqrter.io.out)
assert(!output.valid || output.ready)
Some((input, output))
}
override def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = u match {
case Float(expWidth, sigWidth) =>
val input = Wire(Decoupled(UInt(0.W)))
val output = Wire(Decoupled(u.cloneType))
input.bits := DontCare
// We translate our integer to floating-point form so that we can use the hardfloat divider
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
val self_rec = in_to_float(self)
val one_rec = in_to_float(1.S)
// Instantiate the hardloat divider
val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options))
input.ready := divider.io.inReady
divider.io.inValid := input.valid
divider.io.sqrtOp := false.B
divider.io.a := one_rec
divider.io.b := self_rec
divider.io.roundingMode := consts.round_near_even
divider.io.detectTininess := consts.tininess_afterRounding
output.valid := divider.io.outValid_div
output.bits := fNFromRecFN(expWidth, sigWidth, divider.io.out).asTypeOf(u)
assert(!output.valid || output.ready)
Some((input, output))
case _ => None
}
override def mult_with_reciprocal[U <: Data](reciprocal: U): SInt = reciprocal match {
case recip @ Float(expWidth, sigWidth) =>
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = in_to_float(self)
val reciprocal_rec = recFNFromFN(expWidth, sigWidth, recip.bits)
// Instantiate the hardloat divider
val muladder = Module(new MulRecFN(expWidth, sigWidth))
muladder.io.roundingMode := consts.round_near_even
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := reciprocal_rec
float_to_in(muladder.io.out)
case _ => self
}
}
}
implicit object FloatArithmetic extends Arithmetic[Float] {
// TODO Floating point arithmetic currently switches between recoded and standard formats for every operation. However, it should stay in the recoded format as it travels through the systolic array
override implicit def cast(self: Float): ArithmeticOps[Float] = new ArithmeticOps(self) {
override def *(t: Float): Float = {
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth))
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := t_rec_resized
val out = Wire(Float(self.expWidth, self.sigWidth))
out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
out
}
override def mac(m1: Float, m2: Float): Float = {
// Recode all operands
val m1_rec = recFNFromFN(m1.expWidth, m1.sigWidth, m1.bits)
val m2_rec = recFNFromFN(m2.expWidth, m2.sigWidth, m2.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Resize m1 to self's width
val m1_resizer = Module(new RecFNToRecFN(m1.expWidth, m1.sigWidth, self.expWidth, self.sigWidth))
m1_resizer.io.in := m1_rec
m1_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
m1_resizer.io.detectTininess := consts.tininess_afterRounding
val m1_rec_resized = m1_resizer.io.out
// Resize m2 to self's width
val m2_resizer = Module(new RecFNToRecFN(m2.expWidth, m2.sigWidth, self.expWidth, self.sigWidth))
m2_resizer.io.in := m2_rec
m2_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
m2_resizer.io.detectTininess := consts.tininess_afterRounding
val m2_rec_resized = m2_resizer.io.out
// Perform multiply-add
val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth))
muladder.io.op := 0.U
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := m1_rec_resized
muladder.io.b := m2_rec_resized
muladder.io.c := self_rec
// Convert result to standard format // TODO remove these intermediate recodings
val out = Wire(Float(self.expWidth, self.sigWidth))
out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
out
}
override def +(t: Float): Float = {
require(self.getWidth >= t.getWidth) // This just makes it easier to write the resizing code
// Recode all operands
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Generate 1 as a float
val in_to_rec_fn = Module(new INToRecFN(1, self.expWidth, self.sigWidth))
in_to_rec_fn.io.signedIn := false.B
in_to_rec_fn.io.in := 1.U
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
val one_rec = in_to_rec_fn.io.out
// Resize t
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
// Perform addition
val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth))
muladder.io.op := 0.U
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := t_rec_resized
muladder.io.b := one_rec
muladder.io.c := self_rec
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
result
}
override def -(t: Float): Float = {
val t_sgn = t.bits(t.getWidth-1)
val neg_t = Cat(~t_sgn, t.bits(t.getWidth-2,0)).asTypeOf(t)
self + neg_t
}
override def >>(u: UInt): Float = {
// Recode self
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Get 2^(-u) as a recoded float
val shift_exp = Wire(UInt(self.expWidth.W))
shift_exp := self.bias.U - u
val shift_fn = Cat(0.U(1.W), shift_exp, 0.U((self.sigWidth-1).W))
val shift_rec = recFNFromFN(self.expWidth, self.sigWidth, shift_fn)
assert(shift_exp =/= 0.U, "scaling by denormalized numbers is not currently supported")
// Multiply self and 2^(-u)
val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth))
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := shift_rec
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
result
}
override def >(t: Float): Bool = {
// Recode all operands
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Resize t to self's width
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
val comparator = Module(new CompareRecFN(self.expWidth, self.sigWidth))
comparator.io.a := self_rec
comparator.io.b := t_rec_resized
comparator.io.signaling := false.B
comparator.io.gt
}
override def withWidthOf(t: Float): Float = {
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth))
resizer.io.in := self_rec
resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
resizer.io.detectTininess := consts.tininess_afterRounding
val result = Wire(Float(t.expWidth, t.sigWidth))
result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out)
result
}
override def clippedToWidthOf(t: Float): Float = {
// TODO check for overflow. Right now, we just assume that overflow doesn't happen
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth))
resizer.io.in := self_rec
resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
resizer.io.detectTininess := consts.tininess_afterRounding
val result = Wire(Float(t.expWidth, t.sigWidth))
result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out)
result
}
override def relu: Float = {
val raw = rawFloatFromFN(self.expWidth, self.sigWidth, self.bits)
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := Mux(!raw.isZero && raw.sign, 0.U, self.bits)
result
}
override def zero: Float = 0.U.asTypeOf(self)
override def identity: Float = Cat(0.U(2.W), ~(0.U((self.expWidth-1).W)), 0.U((self.sigWidth-1).W)).asTypeOf(self)
override def minimum: Float = Cat(1.U, ~(0.U(self.expWidth.W)), 0.U((self.sigWidth-1).W)).asTypeOf(self)
}
}
implicit object DummySIntArithmetic extends Arithmetic[DummySInt] {
override implicit def cast(self: DummySInt) = new ArithmeticOps(self) {
override def *(t: DummySInt) = self.dontCare
override def mac(m1: DummySInt, m2: DummySInt) = self.dontCare
override def +(t: DummySInt) = self.dontCare
override def -(t: DummySInt) = self.dontCare
override def >>(t: UInt) = self.dontCare
override def >(t: DummySInt): Bool = false.B
override def identity = self.dontCare
override def withWidthOf(t: DummySInt) = self.dontCare
override def clippedToWidthOf(t: DummySInt) = self.dontCare
override def relu = self.dontCare
override def zero = self.dontCare
override def minimum: DummySInt = self.dontCare
}
}
}
| module MacUnit_51( // @[PE.scala:14:7]
input clock, // @[PE.scala:14:7]
input reset, // @[PE.scala:14:7]
input [7:0] io_in_a, // @[PE.scala:16:14]
input [7:0] io_in_b, // @[PE.scala:16:14]
input [19:0] io_in_c, // @[PE.scala:16:14]
output [19:0] io_out_d // @[PE.scala:16:14]
);
wire [7:0] io_in_a_0 = io_in_a; // @[PE.scala:14:7]
wire [7:0] io_in_b_0 = io_in_b; // @[PE.scala:14:7]
wire [19:0] io_in_c_0 = io_in_c; // @[PE.scala:14:7]
wire [19:0] _io_out_d_T_3; // @[Arithmetic.scala:93:54]
wire [19:0] io_out_d_0; // @[PE.scala:14:7]
wire [15:0] _io_out_d_T = {{8{io_in_a_0[7]}}, io_in_a_0} * {{8{io_in_b_0[7]}}, io_in_b_0}; // @[PE.scala:14:7]
wire [20:0] _io_out_d_T_1 = {{5{_io_out_d_T[15]}}, _io_out_d_T} + {io_in_c_0[19], io_in_c_0}; // @[PE.scala:14:7]
wire [19:0] _io_out_d_T_2 = _io_out_d_T_1[19:0]; // @[Arithmetic.scala:93:54]
assign _io_out_d_T_3 = _io_out_d_T_2; // @[Arithmetic.scala:93:54]
assign io_out_d_0 = _io_out_d_T_3; // @[PE.scala:14:7]
assign io_out_d = io_out_d_0; // @[PE.scala:14:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File PlusArg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.experimental._
import chisel3.util.HasBlackBoxResource
@deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05")
case class PlusArgInfo(default: BigInt, docstring: String)
/** Case class for PlusArg information
*
* @tparam A scala type of the PlusArg value
* @param default optional default value
* @param docstring text to include in the help
* @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT)
*/
private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String)
/** Typeclass for converting a type to a doctype string
* @tparam A some type
*/
trait Doctypeable[A] {
/** Return the doctype string for some option */
def toDoctype(a: Option[A]): String
}
/** Object containing implementations of the Doctypeable typeclass */
object Doctypes {
/** Converts an Int => "INT" */
implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" }
/** Converts a BigInt => "INT" */
implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" }
/** Converts a String => "STRING" */
implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" }
}
class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map(
"FORMAT" -> StringParam(format),
"DEFAULT" -> IntParam(default),
"WIDTH" -> IntParam(width)
)) with HasBlackBoxResource {
val io = IO(new Bundle {
val out = Output(UInt(width.W))
})
addResource("/vsrc/plusarg_reader.v")
}
/* This wrapper class has no outputs, making it clear it is a simulation-only construct */
class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module {
val io = IO(new Bundle {
val count = Input(UInt(width.W))
})
val max = Module(new plusarg_reader(format, default, docstring, width)).io.out
when (max > 0.U) {
assert (io.count < max, s"Timeout exceeded: $docstring")
}
}
import Doctypes._
object PlusArg
{
/** PlusArg("foo") will return 42.U if the simulation is run with +foo=42
* Do not use this as an initial register value. The value is set in an
* initial block and thus accessing it from another initial is racey.
* Add a docstring to document the arg, which can be dumped in an elaboration
* pass.
*/
def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out
}
/** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert
* to kill the simulation when count exceeds the specified integer argument.
* Default 0 will never assert.
*/
def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count
}
}
object PlusArgArtefacts {
private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty
/* Add a new PlusArg */
@deprecated(
"Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08",
"Rocket Chip 2020.05"
)
def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring)
/** Add a new PlusArg
*
* @tparam A scala type of the PlusArg value
* @param name name for the PlusArg
* @param default optional default value
* @param docstring text to include in the help
*/
def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit =
artefacts = artefacts ++
Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default)))
/* From plus args, generate help text */
private def serializeHelp_cHeader(tab: String = ""): String = artefacts
.map{ case(arg, info) =>
s"""|$tab+$arg=${info.doctype}\\n\\
|$tab${" "*20}${info.docstring}\\n\\
|""".stripMargin ++ info.default.map{ case default =>
s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("")
}.toSeq.mkString("\\n\\\n") ++ "\""
/* From plus args, generate a char array of their names */
private def serializeArray_cHeader(tab: String = ""): String = {
val prettyTab = tab + " " * 44 // Length of 'static const ...'
s"${tab}static const char * verilog_plusargs [] = {\\\n" ++
artefacts
.map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" }
.mkString("")++
s"${prettyTab}0};"
}
/* Generate C code to be included in emulator.cc that helps with
* argument parsing based on available Verilog PlusArgs */
def serialize_cHeader(): String =
s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\
|${serializeHelp_cHeader(" "*7)}
|${serializeArray_cHeader()}
|""".stripMargin
}
File Nodes.scala:
package constellation.channel
import chisel3._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.{Parameters, Field}
import freechips.rocketchip.diplomacy._
case class EmptyParams()
case class ChannelEdgeParams(cp: ChannelParams, p: Parameters)
object ChannelImp extends SimpleNodeImp[EmptyParams, ChannelParams, ChannelEdgeParams, Channel] {
def edge(pd: EmptyParams, pu: ChannelParams, p: Parameters, sourceInfo: SourceInfo) = {
ChannelEdgeParams(pu, p)
}
def bundle(e: ChannelEdgeParams) = new Channel(e.cp)(e.p)
def render(e: ChannelEdgeParams) = if (e.cp.possibleFlows.size == 0) {
RenderedEdge(colour = "ffffff", label = "X")
} else {
RenderedEdge(colour = "#0000ff", label = e.cp.payloadBits.toString)
}
override def monitor(bundle: Channel, edge: ChannelEdgeParams): Unit = {
val monitor = Module(new NoCMonitor(edge.cp)(edge.p))
monitor.io.in := bundle
}
// TODO: Add nodepath stuff? override def mixO, override def mixI
}
case class ChannelSourceNode(val destId: Int)(implicit valName: ValName) extends SourceNode(ChannelImp)(Seq(EmptyParams()))
case class ChannelDestNode(val destParams: ChannelParams)(implicit valName: ValName) extends SinkNode(ChannelImp)(Seq(destParams))
case class ChannelAdapterNode(
slaveFn: ChannelParams => ChannelParams = { d => d })(
implicit valName: ValName) extends AdapterNode(ChannelImp)((e: EmptyParams) => e, slaveFn)
case class ChannelIdentityNode()(implicit valName: ValName) extends IdentityNode(ChannelImp)()
case class ChannelEphemeralNode()(implicit valName: ValName) extends EphemeralNode(ChannelImp)()
case class IngressChannelEdgeParams(cp: IngressChannelParams, p: Parameters)
case class EgressChannelEdgeParams(cp: EgressChannelParams, p: Parameters)
object IngressChannelImp extends SimpleNodeImp[EmptyParams, IngressChannelParams, IngressChannelEdgeParams, IngressChannel] {
def edge(pd: EmptyParams, pu: IngressChannelParams, p: Parameters, sourceInfo: SourceInfo) = {
IngressChannelEdgeParams(pu, p)
}
def bundle(e: IngressChannelEdgeParams) = new IngressChannel(e.cp)(e.p)
def render(e: IngressChannelEdgeParams) = if (e.cp.possibleFlows.size == 0) {
RenderedEdge(colour = "ffffff", label = "X")
} else {
RenderedEdge(colour = "#00ff00", label = e.cp.payloadBits.toString)
}
}
object EgressChannelImp extends SimpleNodeImp[EmptyParams, EgressChannelParams, EgressChannelEdgeParams, EgressChannel] {
def edge(pd: EmptyParams, pu: EgressChannelParams, p: Parameters, sourceInfo: SourceInfo) = {
EgressChannelEdgeParams(pu, p)
}
def bundle(e: EgressChannelEdgeParams) = new EgressChannel(e.cp)(e.p)
def render(e: EgressChannelEdgeParams) = if (e.cp.possibleFlows.size == 0) {
RenderedEdge(colour = "ffffff", label = "X")
} else {
RenderedEdge(colour = "#ff0000", label = e.cp.payloadBits.toString)
}
}
case class IngressChannelSourceNode(val destId: Int)(implicit valName: ValName) extends SourceNode(IngressChannelImp)(Seq(EmptyParams()))
case class IngressChannelDestNode(val destParams: IngressChannelParams)(implicit valName: ValName) extends SinkNode(IngressChannelImp)(Seq(destParams))
case class EgressChannelSourceNode(val egressId: Int)(implicit valName: ValName) extends SourceNode(EgressChannelImp)(Seq(EmptyParams()))
case class EgressChannelDestNode(val destParams: EgressChannelParams)(implicit valName: ValName) extends SinkNode(EgressChannelImp)(Seq(destParams))
case class IngressChannelAdapterNode(
slaveFn: IngressChannelParams => IngressChannelParams = { d => d })(
implicit valName: ValName) extends AdapterNode(IngressChannelImp)(m => m, slaveFn)
case class EgressChannelAdapterNode(
slaveFn: EgressChannelParams => EgressChannelParams = { d => d })(
implicit valName: ValName) extends AdapterNode(EgressChannelImp)(m => m, slaveFn)
case class IngressChannelIdentityNode()(implicit valName: ValName) extends IdentityNode(IngressChannelImp)()
case class EgressChannelIdentityNode()(implicit valName: ValName) extends IdentityNode(EgressChannelImp)()
case class IngressChannelEphemeralNode()(implicit valName: ValName) extends EphemeralNode(IngressChannelImp)()
case class EgressChannelEphemeralNode()(implicit valName: ValName) extends EphemeralNode(EgressChannelImp)()
File Router.scala:
package constellation.router
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.{Field, Parameters}
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.util._
import constellation.channel._
import constellation.routing.{RoutingRelation}
import constellation.noc.{HasNoCParams}
case class UserRouterParams(
// Payload width. Must match payload width on all channels attached to this routing node
payloadBits: Int = 64,
// Combines SA and ST stages (removes pipeline register)
combineSAST: Boolean = false,
// Combines RC and VA stages (removes pipeline register)
combineRCVA: Boolean = false,
// Adds combinational path from SA to VA
coupleSAVA: Boolean = false,
vcAllocator: VCAllocatorParams => Parameters => VCAllocator = (vP) => (p) => new RotatingSingleVCAllocator(vP)(p)
)
case class RouterParams(
nodeId: Int,
nIngress: Int,
nEgress: Int,
user: UserRouterParams
)
trait HasRouterOutputParams {
def outParams: Seq[ChannelParams]
def egressParams: Seq[EgressChannelParams]
def allOutParams = outParams ++ egressParams
def nOutputs = outParams.size
def nEgress = egressParams.size
def nAllOutputs = allOutParams.size
}
trait HasRouterInputParams {
def inParams: Seq[ChannelParams]
def ingressParams: Seq[IngressChannelParams]
def allInParams = inParams ++ ingressParams
def nInputs = inParams.size
def nIngress = ingressParams.size
def nAllInputs = allInParams.size
}
trait HasRouterParams
{
def routerParams: RouterParams
def nodeId = routerParams.nodeId
def payloadBits = routerParams.user.payloadBits
}
class DebugBundle(val nIn: Int) extends Bundle {
val va_stall = Vec(nIn, UInt())
val sa_stall = Vec(nIn, UInt())
}
class Router(
val routerParams: RouterParams,
preDiplomaticInParams: Seq[ChannelParams],
preDiplomaticIngressParams: Seq[IngressChannelParams],
outDests: Seq[Int],
egressIds: Seq[Int]
)(implicit p: Parameters) extends LazyModule with HasNoCParams with HasRouterParams {
val allPreDiplomaticInParams = preDiplomaticInParams ++ preDiplomaticIngressParams
val destNodes = preDiplomaticInParams.map(u => ChannelDestNode(u))
val sourceNodes = outDests.map(u => ChannelSourceNode(u))
val ingressNodes = preDiplomaticIngressParams.map(u => IngressChannelDestNode(u))
val egressNodes = egressIds.map(u => EgressChannelSourceNode(u))
val debugNode = BundleBridgeSource(() => new DebugBundle(allPreDiplomaticInParams.size))
val ctrlNode = if (hasCtrl) Some(BundleBridgeSource(() => new RouterCtrlBundle)) else None
def inParams = module.inParams
def outParams = module.outParams
def ingressParams = module.ingressParams
def egressParams = module.egressParams
lazy val module = new LazyModuleImp(this) with HasRouterInputParams with HasRouterOutputParams {
val (io_in, edgesIn) = destNodes.map(_.in(0)).unzip
val (io_out, edgesOut) = sourceNodes.map(_.out(0)).unzip
val (io_ingress, edgesIngress) = ingressNodes.map(_.in(0)).unzip
val (io_egress, edgesEgress) = egressNodes.map(_.out(0)).unzip
val io_debug = debugNode.out(0)._1
val inParams = edgesIn.map(_.cp)
val outParams = edgesOut.map(_.cp)
val ingressParams = edgesIngress.map(_.cp)
val egressParams = edgesEgress.map(_.cp)
allOutParams.foreach(u => require(u.srcId == nodeId && u.payloadBits == routerParams.user.payloadBits))
allInParams.foreach(u => require(u.destId == nodeId && u.payloadBits == routerParams.user.payloadBits))
require(nIngress == routerParams.nIngress)
require(nEgress == routerParams.nEgress)
require(nAllInputs >= 1)
require(nAllOutputs >= 1)
require(nodeId < (1 << nodeIdBits))
val input_units = inParams.zipWithIndex.map { case (u,i) =>
Module(new InputUnit(u, outParams, egressParams,
routerParams.user.combineRCVA, routerParams.user.combineSAST))
.suggestName(s"input_unit_${i}_from_${u.srcId}") }
val ingress_units = ingressParams.zipWithIndex.map { case (u,i) =>
Module(new IngressUnit(i, u, outParams, egressParams,
routerParams.user.combineRCVA, routerParams.user.combineSAST))
.suggestName(s"ingress_unit_${i+nInputs}_from_${u.ingressId}") }
val all_input_units = input_units ++ ingress_units
val output_units = outParams.zipWithIndex.map { case (u,i) =>
Module(new OutputUnit(inParams, ingressParams, u))
.suggestName(s"output_unit_${i}_to_${u.destId}")}
val egress_units = egressParams.zipWithIndex.map { case (u,i) =>
Module(new EgressUnit(routerParams.user.coupleSAVA && all_input_units.size == 1,
routerParams.user.combineSAST,
inParams, ingressParams, u))
.suggestName(s"egress_unit_${i+nOutputs}_to_${u.egressId}")}
val all_output_units = output_units ++ egress_units
val switch = Module(new Switch(routerParams, inParams, outParams, ingressParams, egressParams))
val switch_allocator = Module(new SwitchAllocator(routerParams, inParams, outParams, ingressParams, egressParams))
val vc_allocator = Module(routerParams.user.vcAllocator(
VCAllocatorParams(routerParams, inParams, outParams, ingressParams, egressParams)
)(p))
val route_computer = Module(new RouteComputer(routerParams, inParams, outParams, ingressParams, egressParams))
val fires_count = WireInit(PopCount(vc_allocator.io.req.map(_.fire)))
dontTouch(fires_count)
(io_in zip input_units ).foreach { case (i,u) => u.io.in <> i }
(io_ingress zip ingress_units).foreach { case (i,u) => u.io.in <> i.flit }
(output_units zip io_out ).foreach { case (u,o) => o <> u.io.out }
(egress_units zip io_egress).foreach { case (u,o) => o.flit <> u.io.out }
(route_computer.io.req zip all_input_units).foreach {
case (i,u) => i <> u.io.router_req }
(all_input_units zip route_computer.io.resp).foreach {
case (u,o) => u.io.router_resp <> o }
(vc_allocator.io.req zip all_input_units).foreach {
case (i,u) => i <> u.io.vcalloc_req }
(all_input_units zip vc_allocator.io.resp).foreach {
case (u,o) => u.io.vcalloc_resp <> o }
(all_output_units zip vc_allocator.io.out_allocs).foreach {
case (u,a) => u.io.allocs <> a }
(vc_allocator.io.channel_status zip all_output_units).foreach {
case (a,u) => a := u.io.channel_status }
all_input_units.foreach(in => all_output_units.zipWithIndex.foreach { case (out,outIdx) =>
in.io.out_credit_available(outIdx) := out.io.credit_available
})
(all_input_units zip switch_allocator.io.req).foreach {
case (u,r) => r <> u.io.salloc_req }
(all_output_units zip switch_allocator.io.credit_alloc).foreach {
case (u,a) => u.io.credit_alloc := a }
(switch.io.in zip all_input_units).foreach {
case (i,u) => i <> u.io.out }
(all_output_units zip switch.io.out).foreach {
case (u,o) => u.io.in <> o }
switch.io.sel := (if (routerParams.user.combineSAST) {
switch_allocator.io.switch_sel
} else {
RegNext(switch_allocator.io.switch_sel)
})
if (hasCtrl) {
val io_ctrl = ctrlNode.get.out(0)._1
val ctrl = Module(new RouterControlUnit(routerParams, inParams, outParams, ingressParams, egressParams))
io_ctrl <> ctrl.io.ctrl
(all_input_units zip ctrl.io.in_block ).foreach { case (l,r) => l.io.block := r }
(all_input_units zip ctrl.io.in_fire ).foreach { case (l,r) => r := l.io.out.map(_.valid) }
} else {
input_units.foreach(_.io.block := false.B)
ingress_units.foreach(_.io.block := false.B)
}
(io_debug.va_stall zip all_input_units.map(_.io.debug.va_stall)).map { case (l,r) => l := r }
(io_debug.sa_stall zip all_input_units.map(_.io.debug.sa_stall)).map { case (l,r) => l := r }
val debug_tsc = RegInit(0.U(64.W))
debug_tsc := debug_tsc + 1.U
val debug_sample = RegInit(0.U(64.W))
debug_sample := debug_sample + 1.U
val sample_rate = PlusArg("noc_util_sample_rate", width=20)
when (debug_sample === sample_rate - 1.U) { debug_sample := 0.U }
def sample(fire: Bool, s: String) = {
val util_ctr = RegInit(0.U(64.W))
val fired = RegInit(false.B)
util_ctr := util_ctr + fire
fired := fired || fire
when (sample_rate =/= 0.U && debug_sample === sample_rate - 1.U && fired) {
val fmtStr = s"nocsample %d $s %d\n"
printf(fmtStr, debug_tsc, util_ctr);
fired := fire
}
}
destNodes.map(_.in(0)).foreach { case (in, edge) => in.flit.map { f =>
sample(f.fire, s"${edge.cp.srcId} $nodeId")
} }
ingressNodes.map(_.in(0)).foreach { case (in, edge) =>
sample(in.flit.fire, s"i${edge.cp.asInstanceOf[IngressChannelParams].ingressId} $nodeId")
}
egressNodes.map(_.out(0)).foreach { case (out, edge) =>
sample(out.flit.fire, s"$nodeId e${edge.cp.asInstanceOf[EgressChannelParams].egressId}")
}
}
}
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
| module Router_48( // @[Router.scala:89:25]
input clock, // @[Router.scala:89:25]
input reset, // @[Router.scala:89:25]
output [3:0] auto_debug_out_va_stall_0, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_debug_out_va_stall_1, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_debug_out_sa_stall_0, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_debug_out_sa_stall_1, // @[LazyModuleImp.scala:107:25]
input auto_egress_nodes_out_1_flit_ready, // @[LazyModuleImp.scala:107:25]
output auto_egress_nodes_out_1_flit_valid, // @[LazyModuleImp.scala:107:25]
output auto_egress_nodes_out_1_flit_bits_head, // @[LazyModuleImp.scala:107:25]
output auto_egress_nodes_out_1_flit_bits_tail, // @[LazyModuleImp.scala:107:25]
output [72:0] auto_egress_nodes_out_1_flit_bits_payload, // @[LazyModuleImp.scala:107:25]
input auto_egress_nodes_out_0_flit_ready, // @[LazyModuleImp.scala:107:25]
output auto_egress_nodes_out_0_flit_valid, // @[LazyModuleImp.scala:107:25]
output auto_egress_nodes_out_0_flit_bits_head, // @[LazyModuleImp.scala:107:25]
output auto_egress_nodes_out_0_flit_bits_tail, // @[LazyModuleImp.scala:107:25]
output auto_ingress_nodes_in_0_flit_ready, // @[LazyModuleImp.scala:107:25]
input auto_ingress_nodes_in_0_flit_valid, // @[LazyModuleImp.scala:107:25]
input auto_ingress_nodes_in_0_flit_bits_head, // @[LazyModuleImp.scala:107:25]
input auto_ingress_nodes_in_0_flit_bits_tail, // @[LazyModuleImp.scala:107:25]
input [72:0] auto_ingress_nodes_in_0_flit_bits_payload, // @[LazyModuleImp.scala:107:25]
input [4:0] auto_ingress_nodes_in_0_flit_bits_egress_id, // @[LazyModuleImp.scala:107:25]
output auto_source_nodes_out_flit_0_valid, // @[LazyModuleImp.scala:107:25]
output auto_source_nodes_out_flit_0_bits_head, // @[LazyModuleImp.scala:107:25]
output auto_source_nodes_out_flit_0_bits_tail, // @[LazyModuleImp.scala:107:25]
output [72:0] auto_source_nodes_out_flit_0_bits_payload, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_source_nodes_out_flit_0_bits_flow_vnet_id, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_source_nodes_out_flit_0_bits_flow_ingress_node, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_source_nodes_out_flit_0_bits_flow_ingress_node_id, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_source_nodes_out_flit_0_bits_flow_egress_node, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_source_nodes_out_flit_0_bits_flow_egress_node_id, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_source_nodes_out_flit_0_bits_virt_channel_id, // @[LazyModuleImp.scala:107:25]
input [9:0] auto_source_nodes_out_credit_return, // @[LazyModuleImp.scala:107:25]
input [9:0] auto_source_nodes_out_vc_free, // @[LazyModuleImp.scala:107:25]
input auto_dest_nodes_in_flit_0_valid, // @[LazyModuleImp.scala:107:25]
input auto_dest_nodes_in_flit_0_bits_head, // @[LazyModuleImp.scala:107:25]
input auto_dest_nodes_in_flit_0_bits_tail, // @[LazyModuleImp.scala:107:25]
input [72:0] auto_dest_nodes_in_flit_0_bits_payload, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_dest_nodes_in_flit_0_bits_flow_vnet_id, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_dest_nodes_in_flit_0_bits_flow_ingress_node, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_dest_nodes_in_flit_0_bits_flow_ingress_node_id, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_dest_nodes_in_flit_0_bits_flow_egress_node, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_dest_nodes_in_flit_0_bits_flow_egress_node_id, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_dest_nodes_in_flit_0_bits_virt_channel_id, // @[LazyModuleImp.scala:107:25]
output [9:0] auto_dest_nodes_in_credit_return, // @[LazyModuleImp.scala:107:25]
output [9:0] auto_dest_nodes_in_vc_free // @[LazyModuleImp.scala:107:25]
);
wire [19:0] _plusarg_reader_out; // @[PlusArg.scala:80:11]
wire _vc_allocator_io_req_1_ready; // @[Router.scala:133:30]
wire _vc_allocator_io_req_0_ready; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_2_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_1_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_0_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_0_1; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_0_2; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_0_3; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_0_4; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_0_5; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_0_6; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_0_7; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_0_8; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_0_9; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_0_vc_sel_2_0; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_0_vc_sel_1_0; // @[Router.scala:133:30]
wire _vc_allocator_io_out_allocs_2_0_alloc; // @[Router.scala:133:30]
wire _vc_allocator_io_out_allocs_1_0_alloc; // @[Router.scala:133:30]
wire _vc_allocator_io_out_allocs_0_8_alloc; // @[Router.scala:133:30]
wire _vc_allocator_io_out_allocs_0_9_alloc; // @[Router.scala:133:30]
wire _switch_allocator_io_req_1_0_ready; // @[Router.scala:132:34]
wire _switch_allocator_io_req_0_0_ready; // @[Router.scala:132:34]
wire _switch_allocator_io_credit_alloc_2_0_alloc; // @[Router.scala:132:34]
wire _switch_allocator_io_credit_alloc_2_0_tail; // @[Router.scala:132:34]
wire _switch_allocator_io_credit_alloc_1_0_alloc; // @[Router.scala:132:34]
wire _switch_allocator_io_credit_alloc_1_0_tail; // @[Router.scala:132:34]
wire _switch_allocator_io_credit_alloc_0_8_alloc; // @[Router.scala:132:34]
wire _switch_allocator_io_credit_alloc_0_9_alloc; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_2_0_1_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_2_0_0_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_1_0_1_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_1_0_0_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_0_0_1_0; // @[Router.scala:132:34]
wire _switch_io_out_2_0_valid; // @[Router.scala:131:24]
wire _switch_io_out_2_0_bits_head; // @[Router.scala:131:24]
wire _switch_io_out_2_0_bits_tail; // @[Router.scala:131:24]
wire [72:0] _switch_io_out_2_0_bits_payload; // @[Router.scala:131:24]
wire [3:0] _switch_io_out_2_0_bits_flow_ingress_node; // @[Router.scala:131:24]
wire [1:0] _switch_io_out_2_0_bits_flow_ingress_node_id; // @[Router.scala:131:24]
wire _switch_io_out_1_0_valid; // @[Router.scala:131:24]
wire _switch_io_out_1_0_bits_head; // @[Router.scala:131:24]
wire _switch_io_out_1_0_bits_tail; // @[Router.scala:131:24]
wire [72:0] _switch_io_out_1_0_bits_payload; // @[Router.scala:131:24]
wire _switch_io_out_0_0_valid; // @[Router.scala:131:24]
wire _switch_io_out_0_0_bits_head; // @[Router.scala:131:24]
wire _switch_io_out_0_0_bits_tail; // @[Router.scala:131:24]
wire [72:0] _switch_io_out_0_0_bits_payload; // @[Router.scala:131:24]
wire [2:0] _switch_io_out_0_0_bits_flow_vnet_id; // @[Router.scala:131:24]
wire [3:0] _switch_io_out_0_0_bits_flow_ingress_node; // @[Router.scala:131:24]
wire [1:0] _switch_io_out_0_0_bits_flow_ingress_node_id; // @[Router.scala:131:24]
wire [3:0] _switch_io_out_0_0_bits_flow_egress_node; // @[Router.scala:131:24]
wire [2:0] _switch_io_out_0_0_bits_flow_egress_node_id; // @[Router.scala:131:24]
wire [3:0] _switch_io_out_0_0_bits_virt_channel_id; // @[Router.scala:131:24]
wire _egress_unit_2_to_1_io_credit_available_0; // @[Router.scala:125:13]
wire _egress_unit_2_to_1_io_channel_status_0_occupied; // @[Router.scala:125:13]
wire _egress_unit_2_to_1_io_out_valid; // @[Router.scala:125:13]
wire _egress_unit_1_to_0_io_credit_available_0; // @[Router.scala:125:13]
wire _egress_unit_1_to_0_io_channel_status_0_occupied; // @[Router.scala:125:13]
wire _egress_unit_1_to_0_io_out_valid; // @[Router.scala:125:13]
wire _output_unit_0_to_9_io_credit_available_8; // @[Router.scala:122:13]
wire _output_unit_0_to_9_io_credit_available_9; // @[Router.scala:122:13]
wire _output_unit_0_to_9_io_channel_status_8_occupied; // @[Router.scala:122:13]
wire _output_unit_0_to_9_io_channel_status_9_occupied; // @[Router.scala:122:13]
wire _ingress_unit_1_from_0_io_vcalloc_req_valid; // @[Router.scala:116:13]
wire _ingress_unit_1_from_0_io_vcalloc_req_bits_vc_sel_2_0; // @[Router.scala:116:13]
wire _ingress_unit_1_from_0_io_vcalloc_req_bits_vc_sel_1_0; // @[Router.scala:116:13]
wire _ingress_unit_1_from_0_io_vcalloc_req_bits_vc_sel_0_0; // @[Router.scala:116:13]
wire _ingress_unit_1_from_0_io_vcalloc_req_bits_vc_sel_0_1; // @[Router.scala:116:13]
wire _ingress_unit_1_from_0_io_vcalloc_req_bits_vc_sel_0_2; // @[Router.scala:116:13]
wire _ingress_unit_1_from_0_io_vcalloc_req_bits_vc_sel_0_3; // @[Router.scala:116:13]
wire _ingress_unit_1_from_0_io_vcalloc_req_bits_vc_sel_0_4; // @[Router.scala:116:13]
wire _ingress_unit_1_from_0_io_vcalloc_req_bits_vc_sel_0_5; // @[Router.scala:116:13]
wire _ingress_unit_1_from_0_io_vcalloc_req_bits_vc_sel_0_6; // @[Router.scala:116:13]
wire _ingress_unit_1_from_0_io_vcalloc_req_bits_vc_sel_0_7; // @[Router.scala:116:13]
wire _ingress_unit_1_from_0_io_vcalloc_req_bits_vc_sel_0_8; // @[Router.scala:116:13]
wire _ingress_unit_1_from_0_io_vcalloc_req_bits_vc_sel_0_9; // @[Router.scala:116:13]
wire _ingress_unit_1_from_0_io_salloc_req_0_valid; // @[Router.scala:116:13]
wire _ingress_unit_1_from_0_io_salloc_req_0_bits_vc_sel_2_0; // @[Router.scala:116:13]
wire _ingress_unit_1_from_0_io_salloc_req_0_bits_vc_sel_1_0; // @[Router.scala:116:13]
wire _ingress_unit_1_from_0_io_salloc_req_0_bits_vc_sel_0_0; // @[Router.scala:116:13]
wire _ingress_unit_1_from_0_io_salloc_req_0_bits_vc_sel_0_1; // @[Router.scala:116:13]
wire _ingress_unit_1_from_0_io_salloc_req_0_bits_vc_sel_0_2; // @[Router.scala:116:13]
wire _ingress_unit_1_from_0_io_salloc_req_0_bits_vc_sel_0_3; // @[Router.scala:116:13]
wire _ingress_unit_1_from_0_io_salloc_req_0_bits_vc_sel_0_4; // @[Router.scala:116:13]
wire _ingress_unit_1_from_0_io_salloc_req_0_bits_vc_sel_0_5; // @[Router.scala:116:13]
wire _ingress_unit_1_from_0_io_salloc_req_0_bits_vc_sel_0_6; // @[Router.scala:116:13]
wire _ingress_unit_1_from_0_io_salloc_req_0_bits_vc_sel_0_7; // @[Router.scala:116:13]
wire _ingress_unit_1_from_0_io_salloc_req_0_bits_vc_sel_0_8; // @[Router.scala:116:13]
wire _ingress_unit_1_from_0_io_salloc_req_0_bits_vc_sel_0_9; // @[Router.scala:116:13]
wire _ingress_unit_1_from_0_io_salloc_req_0_bits_tail; // @[Router.scala:116:13]
wire _ingress_unit_1_from_0_io_out_0_valid; // @[Router.scala:116:13]
wire _ingress_unit_1_from_0_io_out_0_bits_flit_head; // @[Router.scala:116:13]
wire _ingress_unit_1_from_0_io_out_0_bits_flit_tail; // @[Router.scala:116:13]
wire [72:0] _ingress_unit_1_from_0_io_out_0_bits_flit_payload; // @[Router.scala:116:13]
wire [2:0] _ingress_unit_1_from_0_io_out_0_bits_flit_flow_vnet_id; // @[Router.scala:116:13]
wire [3:0] _ingress_unit_1_from_0_io_out_0_bits_flit_flow_ingress_node; // @[Router.scala:116:13]
wire [1:0] _ingress_unit_1_from_0_io_out_0_bits_flit_flow_ingress_node_id; // @[Router.scala:116:13]
wire [3:0] _ingress_unit_1_from_0_io_out_0_bits_flit_flow_egress_node; // @[Router.scala:116:13]
wire [2:0] _ingress_unit_1_from_0_io_out_0_bits_flit_flow_egress_node_id; // @[Router.scala:116:13]
wire [3:0] _ingress_unit_1_from_0_io_out_0_bits_out_virt_channel; // @[Router.scala:116:13]
wire _ingress_unit_1_from_0_io_in_ready; // @[Router.scala:116:13]
wire _input_unit_0_from_9_io_vcalloc_req_valid; // @[Router.scala:112:13]
wire _input_unit_0_from_9_io_vcalloc_req_bits_vc_sel_2_0; // @[Router.scala:112:13]
wire _input_unit_0_from_9_io_vcalloc_req_bits_vc_sel_1_0; // @[Router.scala:112:13]
wire _input_unit_0_from_9_io_salloc_req_0_valid; // @[Router.scala:112:13]
wire _input_unit_0_from_9_io_salloc_req_0_bits_vc_sel_2_0; // @[Router.scala:112:13]
wire _input_unit_0_from_9_io_salloc_req_0_bits_vc_sel_1_0; // @[Router.scala:112:13]
wire _input_unit_0_from_9_io_salloc_req_0_bits_tail; // @[Router.scala:112:13]
wire _input_unit_0_from_9_io_out_0_valid; // @[Router.scala:112:13]
wire _input_unit_0_from_9_io_out_0_bits_flit_head; // @[Router.scala:112:13]
wire _input_unit_0_from_9_io_out_0_bits_flit_tail; // @[Router.scala:112:13]
wire [72:0] _input_unit_0_from_9_io_out_0_bits_flit_payload; // @[Router.scala:112:13]
wire [2:0] _input_unit_0_from_9_io_out_0_bits_flit_flow_vnet_id; // @[Router.scala:112:13]
wire [3:0] _input_unit_0_from_9_io_out_0_bits_flit_flow_ingress_node; // @[Router.scala:112:13]
wire [1:0] _input_unit_0_from_9_io_out_0_bits_flit_flow_ingress_node_id; // @[Router.scala:112:13]
wire [3:0] _input_unit_0_from_9_io_out_0_bits_flit_flow_egress_node; // @[Router.scala:112:13]
wire [2:0] _input_unit_0_from_9_io_out_0_bits_flit_flow_egress_node_id; // @[Router.scala:112:13]
wire [2:0] fires_count = {1'h0, {1'h0, _vc_allocator_io_req_0_ready & _input_unit_0_from_9_io_vcalloc_req_valid} + {1'h0, _vc_allocator_io_req_1_ready & _ingress_unit_1_from_0_io_vcalloc_req_valid}}; // @[Decoupled.scala:51:35]
reg REG_2_0_1_0; // @[Router.scala:178:14]
reg REG_2_0_0_0; // @[Router.scala:178:14]
reg REG_1_0_1_0; // @[Router.scala:178:14]
reg REG_1_0_0_0; // @[Router.scala:178:14]
reg REG_0_0_1_0; // @[Router.scala:178:14]
reg [63:0] debug_tsc; // @[Router.scala:195:28]
reg [63:0] debug_sample; // @[Router.scala:197:31]
wire _GEN = debug_sample == {44'h0, _plusarg_reader_out - 20'h1}; // @[PlusArg.scala:80:11]
reg [63:0] util_ctr; // @[Router.scala:203:29]
reg fired; // @[Router.scala:204:26]
wire _GEN_0 = (|_plusarg_reader_out) & _GEN; // @[PlusArg.scala:80:11]
wire _GEN_1 = _GEN_0 & fired; // @[Router.scala:204:26, :207:{33,71}]
reg [63:0] util_ctr_1; // @[Router.scala:203:29]
reg fired_1; // @[Router.scala:204:26]
wire _GEN_2 = _GEN_0 & fired_1; // @[Router.scala:204:26, :207:{33,71}]
reg [63:0] util_ctr_4; // @[Router.scala:203:29]
reg fired_4; // @[Router.scala:204:26]
wire _GEN_3 = _GEN_0 & fired_4; // @[Router.scala:204:26, :207:{33,71}]
reg [63:0] util_ctr_5; // @[Router.scala:203:29]
reg fired_5; // @[Router.scala:204:26]
wire _GEN_4 = _GEN_0 & fired_5; // @[Router.scala:204:26, :207:{33,71}] |
Generate the Verilog code corresponding to the following Chisel files.
File Monitor.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceLine
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import freechips.rocketchip.diplomacy.EnableMonitors
import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode}
import freechips.rocketchip.util.PlusArg
case class TLMonitorArgs(edge: TLEdge)
abstract class TLMonitorBase(args: TLMonitorArgs) extends Module
{
val io = IO(new Bundle {
val in = Input(new TLBundle(args.edge.bundle))
})
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit
legalize(io.in, args.edge, reset)
}
object TLMonitor {
def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = {
if (enable) {
EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) }
} else { node }
}
}
class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args)
{
require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal))
val cover_prop_class = PropertyClass.Default
//Like assert but can flip to being an assumption for formal verification
def monAssert(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir, cond, message, PropertyClass.Default)
}
def assume(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir.flip, cond, message, PropertyClass.Default)
}
def extra = {
args.edge.sourceInfo match {
case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)"
case _ => ""
}
}
def visible(address: UInt, source: UInt, edge: TLEdge) =
edge.client.clients.map { c =>
!c.sourceId.contains(source) ||
c.visibility.map(_.contains(address)).reduce(_ || _)
}.reduce(_ && _)
def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = {
//switch this flag to turn on diplomacy in error messages
def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n"
monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra)
// Reuse these subexpressions to save some firrtl lines
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility")
//The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B
//TODO: check for acquireT?
when (bundle.opcode === TLMessages.AcquireBlock) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AcquirePerm) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra)
monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra)
monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra)
monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra)
}
}
def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = {
monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility")
// Reuse these subexpressions to save some firrtl lines
val address_ok = edge.manager.containsSafe(edge.address(bundle))
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source
when (bundle.opcode === TLMessages.Probe) {
assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra)
assume (address_ok, "'B' channel Probe carries unmanaged address" + extra)
assume (legal_source, "'B' channel Probe carries source that is not first source" + extra)
assume (is_aligned, "'B' channel Probe address not aligned to size" + extra)
assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra)
assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra)
assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra)
monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra)
monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra)
}
}
def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = {
monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val address_ok = edge.manager.containsSafe(edge.address(bundle))
monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility")
when (bundle.opcode === TLMessages.ProbeAck) {
monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ProbeAckData) {
monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.Release) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ReleaseData) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra)
}
}
def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = {
assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val sink_ok = bundle.sink < edge.manager.endSinkId.U
val deny_put_ok = edge.manager.mayDenyPut.B
val deny_get_ok = edge.manager.mayDenyGet.B
when (bundle.opcode === TLMessages.ReleaseAck) {
assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra)
assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra)
assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra)
}
when (bundle.opcode === TLMessages.Grant) {
assume (source_ok, "'D' channel Grant carries invalid source ID" + extra)
assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra)
assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra)
}
when (bundle.opcode === TLMessages.GrantData) {
assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra)
assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra)
}
}
def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = {
val sink_ok = bundle.sink < edge.manager.endSinkId.U
monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra)
}
def legalizeFormat(bundle: TLBundle, edge: TLEdge) = {
when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) }
when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) }
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) }
when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) }
when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) }
} else {
monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra)
monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra)
monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra)
}
}
def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = {
val a_first = edge.first(a.bits, a.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (a.valid && !a_first) {
monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra)
monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra)
monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra)
monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra)
monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra)
}
when (a.fire && a_first) {
opcode := a.bits.opcode
param := a.bits.param
size := a.bits.size
source := a.bits.source
address := a.bits.address
}
}
def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = {
val b_first = edge.first(b.bits, b.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (b.valid && !b_first) {
monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra)
monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra)
monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra)
monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra)
monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra)
}
when (b.fire && b_first) {
opcode := b.bits.opcode
param := b.bits.param
size := b.bits.size
source := b.bits.source
address := b.bits.address
}
}
def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = {
// Symbolic variable
val sym_source = Wire(UInt(edge.client.endSourceId.W))
// TODO: Connect sym_source to a fixed value for simulation and to a
// free wire in formal
sym_source := 0.U
// Type casting Int to UInt
val maxSourceId = Wire(UInt(edge.client.endSourceId.W))
maxSourceId := edge.client.endSourceId.U
// Delayed verison of sym_source
val sym_source_d = Reg(UInt(edge.client.endSourceId.W))
sym_source_d := sym_source
// These will be constraints for FV setup
Property(
MonitorDirection.Monitor,
(sym_source === sym_source_d),
"sym_source should remain stable",
PropertyClass.Default)
Property(
MonitorDirection.Monitor,
(sym_source <= maxSourceId),
"sym_source should take legal value",
PropertyClass.Default)
val my_resp_pend = RegInit(false.B)
val my_opcode = Reg(UInt())
val my_size = Reg(UInt())
val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire)
val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire)
val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source)
val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source)
val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat)
val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend)
when (my_set_resp_pend) {
my_resp_pend := true.B
} .elsewhen (my_clr_resp_pend) {
my_resp_pend := false.B
}
when (my_a_first_beat) {
my_opcode := bundle.a.bits.opcode
my_size := bundle.a.bits.size
}
val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size)
val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode)
val my_resp_opcode_legal = Wire(Bool())
when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) ||
(my_resp_opcode === TLMessages.LogicalData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData)
} .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck)
} .otherwise {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck)
}
monAssert (IfThen(my_resp_pend, !my_a_first_beat),
"Request message should not be sent with a source ID, for which a response message" +
"is already pending (not received until current cycle) for a prior request message" +
"with the same source ID" + extra)
assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)),
"Response message should be accepted with a source ID only if a request message with the" +
"same source ID has been accepted or is being accepted in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)),
"Response message should be sent with a source ID only if a request message with the" +
"same source ID has been accepted or is being sent in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)),
"If d_valid is 1, then d_size should be same as a_size of the corresponding request" +
"message" + extra)
assume (IfThen(my_d_first_beat, my_resp_opcode_legal),
"If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" +
"request message" + extra)
}
def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = {
val c_first = edge.first(c.bits, c.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (c.valid && !c_first) {
monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra)
monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra)
monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra)
monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra)
monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra)
}
when (c.fire && c_first) {
opcode := c.bits.opcode
param := c.bits.param
size := c.bits.size
source := c.bits.source
address := c.bits.address
}
}
def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = {
val d_first = edge.first(d.bits, d.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val sink = Reg(UInt())
val denied = Reg(Bool())
when (d.valid && !d_first) {
assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra)
assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra)
assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra)
assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra)
assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra)
assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra)
}
when (d.fire && d_first) {
opcode := d.bits.opcode
param := d.bits.param
size := d.bits.size
source := d.bits.source
sink := d.bits.sink
denied := d.bits.denied
}
}
def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = {
legalizeMultibeatA(bundle.a, edge)
legalizeMultibeatD(bundle.d, edge)
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
legalizeMultibeatB(bundle.b, edge)
legalizeMultibeatC(bundle.c, edge)
}
}
//This is left in for almond which doesn't adhere to the tilelink protocol
@deprecated("Use legalizeADSource instead if possible","")
def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.client.endSourceId.W))
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val a_set = WireInit(0.U(edge.client.endSourceId.W))
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra)
}
if (edge.manager.minLatency > 0) {
assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = {
val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size)
val log_a_size_bus_size = log2Ceil(a_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error
inflight.suggestName("inflight")
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
inflight_opcodes.suggestName("inflight_opcodes")
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
inflight_sizes.suggestName("inflight_sizes")
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
a_first.suggestName("a_first")
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
d_first.suggestName("d_first")
val a_set = WireInit(0.U(edge.client.endSourceId.W))
val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
a_set.suggestName("a_set")
a_set_wo_ready.suggestName("a_set_wo_ready")
val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
a_opcodes_set.suggestName("a_opcodes_set")
val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
a_sizes_set.suggestName("a_sizes_set")
val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W))
a_opcode_lookup.suggestName("a_opcode_lookup")
a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U
val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W))
a_size_lookup.suggestName("a_size_lookup")
a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U
val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant))
val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant))
val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W))
a_opcodes_set_interm.suggestName("a_opcodes_set_interm")
val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W))
a_sizes_set_interm.suggestName("a_sizes_set_interm")
when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) {
a_set_wo_ready := UIntToOH(bundle.a.bits.source)
}
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U
a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U
a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U)
a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U)
monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
d_opcodes_clr.suggestName("d_opcodes_clr")
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) ||
(bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra)
assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) ||
(bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra)
assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) {
assume((!bundle.d.ready) || bundle.a.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = {
val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size)
val log_c_size_bus_size = log2Ceil(c_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W))
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
inflight.suggestName("inflight")
inflight_opcodes.suggestName("inflight_opcodes")
inflight_sizes.suggestName("inflight_sizes")
val c_first = edge.first(bundle.c.bits, bundle.c.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
c_first.suggestName("c_first")
d_first.suggestName("d_first")
val c_set = WireInit(0.U(edge.client.endSourceId.W))
val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
c_set.suggestName("c_set")
c_set_wo_ready.suggestName("c_set_wo_ready")
c_opcodes_set.suggestName("c_opcodes_set")
c_sizes_set.suggestName("c_sizes_set")
val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W))
val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W))
c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U
c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U
c_opcode_lookup.suggestName("c_opcode_lookup")
c_size_lookup.suggestName("c_size_lookup")
val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W))
val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W))
c_opcodes_set_interm.suggestName("c_opcodes_set_interm")
c_sizes_set_interm.suggestName("c_sizes_set_interm")
when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) {
c_set_wo_ready := UIntToOH(bundle.c.bits.source)
}
when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) {
c_set := UIntToOH(bundle.c.bits.source)
c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U
c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U
c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U)
c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U)
monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra)
}
val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
d_opcodes_clr.suggestName("d_opcodes_clr")
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) {
assume((!bundle.d.ready) || bundle.c.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
when (c_set_wo_ready.orR) {
assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra)
}
}
inflight := (inflight | c_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.manager.endSinkId.W))
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val e_first = true.B
val d_set = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) {
d_set := UIntToOH(bundle.d.bits.sink)
assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra)
}
val e_clr = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) {
e_clr := UIntToOH(bundle.e.bits.sink)
monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra)
}
// edge.client.minLatency applies to BC, not DE
inflight := (inflight | d_set) & ~e_clr
}
def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = {
val sourceBits = log2Ceil(edge.client.endSourceId)
val tooBig = 14 // >16kB worth of flight information gets to be too much
if (sourceBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked")
} else {
if (args.edge.params(TestplanTestType).simulation) {
if (args.edge.params(TLMonitorStrictMode)) {
legalizeADSource(bundle, edge)
legalizeCDSource(bundle, edge)
} else {
legalizeADSourceOld(bundle, edge)
}
}
if (args.edge.params(TestplanTestType).formal) {
legalizeADSourceFormal(bundle, edge)
}
}
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
// legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize...
val sinkBits = log2Ceil(edge.manager.endSinkId)
if (sinkBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked")
} else {
legalizeDESink(bundle, edge)
}
}
}
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = {
legalizeFormat (bundle, edge)
legalizeMultibeat (bundle, edge)
legalizeUnique (bundle, edge)
}
}
File Misc.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
import chisel3.util.random.LFSR
import org.chipsalliance.cde.config.Parameters
import scala.math._
class ParameterizedBundle(implicit p: Parameters) extends Bundle
trait Clocked extends Bundle {
val clock = Clock()
val reset = Bool()
}
object DecoupledHelper {
def apply(rvs: Bool*) = new DecoupledHelper(rvs)
}
class DecoupledHelper(val rvs: Seq[Bool]) {
def fire(exclude: Bool, includes: Bool*) = {
require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!")
(rvs.filter(_ ne exclude) ++ includes).reduce(_ && _)
}
def fire() = {
rvs.reduce(_ && _)
}
}
object MuxT {
def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2))
def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3))
def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4))
}
/** Creates a cascade of n MuxTs to search for a key value. */
object MuxTLookup {
def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
}
object ValidMux {
def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = {
apply(v1 +: v2.toSeq)
}
def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = {
val out = Wire(Valid(valids.head.bits.cloneType))
out.valid := valids.map(_.valid).reduce(_ || _)
out.bits := MuxCase(valids.head.bits,
valids.map(v => (v.valid -> v.bits)))
out
}
}
object Str
{
def apply(s: String): UInt = {
var i = BigInt(0)
require(s.forall(validChar _))
for (c <- s)
i = (i << 8) | c
i.U((s.length*8).W)
}
def apply(x: Char): UInt = {
require(validChar(x))
x.U(8.W)
}
def apply(x: UInt): UInt = apply(x, 10)
def apply(x: UInt, radix: Int): UInt = {
val rad = radix.U
val w = x.getWidth
require(w > 0)
var q = x
var s = digit(q % rad)
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s)
}
s
}
def apply(x: SInt): UInt = apply(x, 10)
def apply(x: SInt, radix: Int): UInt = {
val neg = x < 0.S
val abs = x.abs.asUInt
if (radix != 10) {
Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix))
} else {
val rad = radix.U
val w = abs.getWidth
require(w > 0)
var q = abs
var s = digit(q % rad)
var needSign = neg
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
val placeSpace = q === 0.U
val space = Mux(needSign, Str('-'), Str(' '))
needSign = needSign && !placeSpace
s = Cat(Mux(placeSpace, space, digit(q % rad)), s)
}
Cat(Mux(needSign, Str('-'), Str(' ')), s)
}
}
private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0)
private def validChar(x: Char) = x == (x & 0xFF)
}
object Split
{
def apply(x: UInt, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n2: Int, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
}
object Random
{
def apply(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0)
else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod))
}
def apply(mod: Int): UInt = apply(mod, randomizer)
def oneHot(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0))
else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt
}
def oneHot(mod: Int): UInt = oneHot(mod, randomizer)
private def randomizer = LFSR(16)
private def partition(value: UInt, slices: Int) =
Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U)
}
object Majority {
def apply(in: Set[Bool]): Bool = {
val n = (in.size >> 1) + 1
val clauses = in.subsets(n).map(_.reduce(_ && _))
clauses.reduce(_ || _)
}
def apply(in: Seq[Bool]): Bool = apply(in.toSet)
def apply(in: UInt): Bool = apply(in.asBools.toSet)
}
object PopCountAtLeast {
private def two(x: UInt): (Bool, Bool) = x.getWidth match {
case 1 => (x.asBool, false.B)
case n =>
val half = x.getWidth / 2
val (leftOne, leftTwo) = two(x(half - 1, 0))
val (rightOne, rightTwo) = two(x(x.getWidth - 1, half))
(leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne))
}
def apply(x: UInt, n: Int): Bool = n match {
case 0 => true.B
case 1 => x.orR
case 2 => two(x)._2
case 3 => PopCount(x) >= n.U
}
}
// This gets used everywhere, so make the smallest circuit possible ...
// Given an address and size, create a mask of beatBytes size
// eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111
// groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01
object MaskGen {
def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = {
require (groupBy >= 1 && beatBytes >= groupBy)
require (isPow2(beatBytes) && isPow2(groupBy))
val lgBytes = log2Ceil(beatBytes)
val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U
def helper(i: Int): Seq[(Bool, Bool)] = {
if (i == 0) {
Seq((lgSize >= lgBytes.asUInt, true.B))
} else {
val sub = helper(i-1)
val size = sizeOH(lgBytes - i)
val bit = addr_lo(lgBytes - i)
val nbit = !bit
Seq.tabulate (1 << i) { j =>
val (sub_acc, sub_eq) = sub(j/2)
val eq = sub_eq && (if (j % 2 == 1) bit else nbit)
val acc = sub_acc || (size && eq)
(acc, eq)
}
}
}
if (groupBy == beatBytes) 1.U else
Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse)
}
}
File PlusArg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.experimental._
import chisel3.util.HasBlackBoxResource
@deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05")
case class PlusArgInfo(default: BigInt, docstring: String)
/** Case class for PlusArg information
*
* @tparam A scala type of the PlusArg value
* @param default optional default value
* @param docstring text to include in the help
* @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT)
*/
private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String)
/** Typeclass for converting a type to a doctype string
* @tparam A some type
*/
trait Doctypeable[A] {
/** Return the doctype string for some option */
def toDoctype(a: Option[A]): String
}
/** Object containing implementations of the Doctypeable typeclass */
object Doctypes {
/** Converts an Int => "INT" */
implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" }
/** Converts a BigInt => "INT" */
implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" }
/** Converts a String => "STRING" */
implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" }
}
class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map(
"FORMAT" -> StringParam(format),
"DEFAULT" -> IntParam(default),
"WIDTH" -> IntParam(width)
)) with HasBlackBoxResource {
val io = IO(new Bundle {
val out = Output(UInt(width.W))
})
addResource("/vsrc/plusarg_reader.v")
}
/* This wrapper class has no outputs, making it clear it is a simulation-only construct */
class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module {
val io = IO(new Bundle {
val count = Input(UInt(width.W))
})
val max = Module(new plusarg_reader(format, default, docstring, width)).io.out
when (max > 0.U) {
assert (io.count < max, s"Timeout exceeded: $docstring")
}
}
import Doctypes._
object PlusArg
{
/** PlusArg("foo") will return 42.U if the simulation is run with +foo=42
* Do not use this as an initial register value. The value is set in an
* initial block and thus accessing it from another initial is racey.
* Add a docstring to document the arg, which can be dumped in an elaboration
* pass.
*/
def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out
}
/** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert
* to kill the simulation when count exceeds the specified integer argument.
* Default 0 will never assert.
*/
def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count
}
}
object PlusArgArtefacts {
private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty
/* Add a new PlusArg */
@deprecated(
"Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08",
"Rocket Chip 2020.05"
)
def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring)
/** Add a new PlusArg
*
* @tparam A scala type of the PlusArg value
* @param name name for the PlusArg
* @param default optional default value
* @param docstring text to include in the help
*/
def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit =
artefacts = artefacts ++
Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default)))
/* From plus args, generate help text */
private def serializeHelp_cHeader(tab: String = ""): String = artefacts
.map{ case(arg, info) =>
s"""|$tab+$arg=${info.doctype}\\n\\
|$tab${" "*20}${info.docstring}\\n\\
|""".stripMargin ++ info.default.map{ case default =>
s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("")
}.toSeq.mkString("\\n\\\n") ++ "\""
/* From plus args, generate a char array of their names */
private def serializeArray_cHeader(tab: String = ""): String = {
val prettyTab = tab + " " * 44 // Length of 'static const ...'
s"${tab}static const char * verilog_plusargs [] = {\\\n" ++
artefacts
.map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" }
.mkString("")++
s"${prettyTab}0};"
}
/* Generate C code to be included in emulator.cc that helps with
* argument parsing based on available Verilog PlusArgs */
def serialize_cHeader(): String =
s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\
|${serializeHelp_cHeader(" "*7)}
|${serializeArray_cHeader()}
|""".stripMargin
}
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
File Bundles.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import freechips.rocketchip.util._
import scala.collection.immutable.ListMap
import chisel3.util.Decoupled
import chisel3.util.DecoupledIO
import chisel3.reflect.DataMirror
abstract class TLBundleBase(val params: TLBundleParameters) extends Bundle
// common combos in lazy policy:
// Put + Acquire
// Release + AccessAck
object TLMessages
{
// A B C D E
def PutFullData = 0.U // . . => AccessAck
def PutPartialData = 1.U // . . => AccessAck
def ArithmeticData = 2.U // . . => AccessAckData
def LogicalData = 3.U // . . => AccessAckData
def Get = 4.U // . . => AccessAckData
def Hint = 5.U // . . => HintAck
def AcquireBlock = 6.U // . => Grant[Data]
def AcquirePerm = 7.U // . => Grant[Data]
def Probe = 6.U // . => ProbeAck[Data]
def AccessAck = 0.U // . .
def AccessAckData = 1.U // . .
def HintAck = 2.U // . .
def ProbeAck = 4.U // .
def ProbeAckData = 5.U // .
def Release = 6.U // . => ReleaseAck
def ReleaseData = 7.U // . => ReleaseAck
def Grant = 4.U // . => GrantAck
def GrantData = 5.U // . => GrantAck
def ReleaseAck = 6.U // .
def GrantAck = 0.U // .
def isA(x: UInt) = x <= AcquirePerm
def isB(x: UInt) = x <= Probe
def isC(x: UInt) = x <= ReleaseData
def isD(x: UInt) = x <= ReleaseAck
def adResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, Grant, Grant)
def bcResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, ProbeAck, ProbeAck)
def a = Seq( ("PutFullData",TLPermissions.PermMsgReserved),
("PutPartialData",TLPermissions.PermMsgReserved),
("ArithmeticData",TLAtomics.ArithMsg),
("LogicalData",TLAtomics.LogicMsg),
("Get",TLPermissions.PermMsgReserved),
("Hint",TLHints.HintsMsg),
("AcquireBlock",TLPermissions.PermMsgGrow),
("AcquirePerm",TLPermissions.PermMsgGrow))
def b = Seq( ("PutFullData",TLPermissions.PermMsgReserved),
("PutPartialData",TLPermissions.PermMsgReserved),
("ArithmeticData",TLAtomics.ArithMsg),
("LogicalData",TLAtomics.LogicMsg),
("Get",TLPermissions.PermMsgReserved),
("Hint",TLHints.HintsMsg),
("Probe",TLPermissions.PermMsgCap))
def c = Seq( ("AccessAck",TLPermissions.PermMsgReserved),
("AccessAckData",TLPermissions.PermMsgReserved),
("HintAck",TLPermissions.PermMsgReserved),
("Invalid Opcode",TLPermissions.PermMsgReserved),
("ProbeAck",TLPermissions.PermMsgReport),
("ProbeAckData",TLPermissions.PermMsgReport),
("Release",TLPermissions.PermMsgReport),
("ReleaseData",TLPermissions.PermMsgReport))
def d = Seq( ("AccessAck",TLPermissions.PermMsgReserved),
("AccessAckData",TLPermissions.PermMsgReserved),
("HintAck",TLPermissions.PermMsgReserved),
("Invalid Opcode",TLPermissions.PermMsgReserved),
("Grant",TLPermissions.PermMsgCap),
("GrantData",TLPermissions.PermMsgCap),
("ReleaseAck",TLPermissions.PermMsgReserved))
}
/**
* The three primary TileLink permissions are:
* (T)runk: the agent is (or is on inwards path to) the global point of serialization.
* (B)ranch: the agent is on an outwards path to
* (N)one:
* These permissions are permuted by transfer operations in various ways.
* Operations can cap permissions, request for them to be grown or shrunk,
* or for a report on their current status.
*/
object TLPermissions
{
val aWidth = 2
val bdWidth = 2
val cWidth = 3
// Cap types (Grant = new permissions, Probe = permisions <= target)
def toT = 0.U(bdWidth.W)
def toB = 1.U(bdWidth.W)
def toN = 2.U(bdWidth.W)
def isCap(x: UInt) = x <= toN
// Grow types (Acquire = permissions >= target)
def NtoB = 0.U(aWidth.W)
def NtoT = 1.U(aWidth.W)
def BtoT = 2.U(aWidth.W)
def isGrow(x: UInt) = x <= BtoT
// Shrink types (ProbeAck, Release)
def TtoB = 0.U(cWidth.W)
def TtoN = 1.U(cWidth.W)
def BtoN = 2.U(cWidth.W)
def isShrink(x: UInt) = x <= BtoN
// Report types (ProbeAck, Release)
def TtoT = 3.U(cWidth.W)
def BtoB = 4.U(cWidth.W)
def NtoN = 5.U(cWidth.W)
def isReport(x: UInt) = x <= NtoN
def PermMsgGrow:Seq[String] = Seq("Grow NtoB", "Grow NtoT", "Grow BtoT")
def PermMsgCap:Seq[String] = Seq("Cap toT", "Cap toB", "Cap toN")
def PermMsgReport:Seq[String] = Seq("Shrink TtoB", "Shrink TtoN", "Shrink BtoN", "Report TotT", "Report BtoB", "Report NtoN")
def PermMsgReserved:Seq[String] = Seq("Reserved")
}
object TLAtomics
{
val width = 3
// Arithmetic types
def MIN = 0.U(width.W)
def MAX = 1.U(width.W)
def MINU = 2.U(width.W)
def MAXU = 3.U(width.W)
def ADD = 4.U(width.W)
def isArithmetic(x: UInt) = x <= ADD
// Logical types
def XOR = 0.U(width.W)
def OR = 1.U(width.W)
def AND = 2.U(width.W)
def SWAP = 3.U(width.W)
def isLogical(x: UInt) = x <= SWAP
def ArithMsg:Seq[String] = Seq("MIN", "MAX", "MINU", "MAXU", "ADD")
def LogicMsg:Seq[String] = Seq("XOR", "OR", "AND", "SWAP")
}
object TLHints
{
val width = 1
def PREFETCH_READ = 0.U(width.W)
def PREFETCH_WRITE = 1.U(width.W)
def isHints(x: UInt) = x <= PREFETCH_WRITE
def HintsMsg:Seq[String] = Seq("PrefetchRead", "PrefetchWrite")
}
sealed trait TLChannel extends TLBundleBase {
val channelName: String
}
sealed trait TLDataChannel extends TLChannel
sealed trait TLAddrChannel extends TLDataChannel
final class TLBundleA(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleA_${params.shortName}"
val channelName = "'A' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(List(TLAtomics.width, TLPermissions.aWidth, TLHints.width).max.W) // amo_opcode || grow perms || hint
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // from
val address = UInt(params.addressBits.W) // to
val user = BundleMap(params.requestFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val mask = UInt((params.dataBits/8).W)
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleB(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleB_${params.shortName}"
val channelName = "'B' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.bdWidth.W) // cap perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // to
val address = UInt(params.addressBits.W) // from
// variable fields during multibeat:
val mask = UInt((params.dataBits/8).W)
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleC(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleC_${params.shortName}"
val channelName = "'C' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.cWidth.W) // shrink or report perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // from
val address = UInt(params.addressBits.W) // to
val user = BundleMap(params.requestFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleD(params: TLBundleParameters)
extends TLBundleBase(params) with TLDataChannel
{
override def typeName = s"TLBundleD_${params.shortName}"
val channelName = "'D' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.bdWidth.W) // cap perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // to
val sink = UInt(params.sinkBits.W) // from
val denied = Bool() // implies corrupt iff *Data
val user = BundleMap(params.responseFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleE(params: TLBundleParameters)
extends TLBundleBase(params) with TLChannel
{
override def typeName = s"TLBundleE_${params.shortName}"
val channelName = "'E' channel"
val sink = UInt(params.sinkBits.W) // to
}
class TLBundle(val params: TLBundleParameters) extends Record
{
// Emulate a Bundle with elements abcde or ad depending on params.hasBCE
private val optA = Some (Decoupled(new TLBundleA(params)))
private val optB = params.hasBCE.option(Flipped(Decoupled(new TLBundleB(params))))
private val optC = params.hasBCE.option(Decoupled(new TLBundleC(params)))
private val optD = Some (Flipped(Decoupled(new TLBundleD(params))))
private val optE = params.hasBCE.option(Decoupled(new TLBundleE(params)))
def a: DecoupledIO[TLBundleA] = optA.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleA(params)))))
def b: DecoupledIO[TLBundleB] = optB.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleB(params)))))
def c: DecoupledIO[TLBundleC] = optC.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleC(params)))))
def d: DecoupledIO[TLBundleD] = optD.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleD(params)))))
def e: DecoupledIO[TLBundleE] = optE.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleE(params)))))
val elements =
if (params.hasBCE) ListMap("e" -> e, "d" -> d, "c" -> c, "b" -> b, "a" -> a)
else ListMap("d" -> d, "a" -> a)
def tieoff(): Unit = {
DataMirror.specifiedDirectionOf(a.ready) match {
case SpecifiedDirection.Input =>
a.ready := false.B
c.ready := false.B
e.ready := false.B
b.valid := false.B
d.valid := false.B
case SpecifiedDirection.Output =>
a.valid := false.B
c.valid := false.B
e.valid := false.B
b.ready := false.B
d.ready := false.B
case _ =>
}
}
}
object TLBundle
{
def apply(params: TLBundleParameters) = new TLBundle(params)
}
class TLAsyncBundleBase(val params: TLAsyncBundleParameters) extends Bundle
class TLAsyncBundle(params: TLAsyncBundleParameters) extends TLAsyncBundleBase(params)
{
val a = new AsyncBundle(new TLBundleA(params.base), params.async)
val b = Flipped(new AsyncBundle(new TLBundleB(params.base), params.async))
val c = new AsyncBundle(new TLBundleC(params.base), params.async)
val d = Flipped(new AsyncBundle(new TLBundleD(params.base), params.async))
val e = new AsyncBundle(new TLBundleE(params.base), params.async)
}
class TLRationalBundle(params: TLBundleParameters) extends TLBundleBase(params)
{
val a = RationalIO(new TLBundleA(params))
val b = Flipped(RationalIO(new TLBundleB(params)))
val c = RationalIO(new TLBundleC(params))
val d = Flipped(RationalIO(new TLBundleD(params)))
val e = RationalIO(new TLBundleE(params))
}
class TLCreditedBundle(params: TLBundleParameters) extends TLBundleBase(params)
{
val a = CreditedIO(new TLBundleA(params))
val b = Flipped(CreditedIO(new TLBundleB(params)))
val c = CreditedIO(new TLBundleC(params))
val d = Flipped(CreditedIO(new TLBundleD(params)))
val e = CreditedIO(new TLBundleE(params))
}
File Parameters.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.diplomacy
import chisel3._
import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor}
import freechips.rocketchip.util.ShiftQueue
/** Options for describing the attributes of memory regions */
object RegionType {
// Define the 'more relaxed than' ordering
val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS)
sealed trait T extends Ordered[T] {
def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this)
}
case object CACHED extends T // an intermediate agent may have cached a copy of the region for you
case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided
case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible
case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached
case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects
case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed
case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively
}
// A non-empty half-open range; [start, end)
case class IdRange(start: Int, end: Int) extends Ordered[IdRange]
{
require (start >= 0, s"Ids cannot be negative, but got: $start.")
require (start <= end, "Id ranges cannot be negative.")
def compare(x: IdRange) = {
val primary = (this.start - x.start).signum
val secondary = (x.end - this.end).signum
if (primary != 0) primary else secondary
}
def overlaps(x: IdRange) = start < x.end && x.start < end
def contains(x: IdRange) = start <= x.start && x.end <= end
def contains(x: Int) = start <= x && x < end
def contains(x: UInt) =
if (size == 0) {
false.B
} else if (size == 1) { // simple comparison
x === start.U
} else {
// find index of largest different bit
val largestDeltaBit = log2Floor(start ^ (end-1))
val smallestCommonBit = largestDeltaBit + 1 // may not exist in x
val uncommonMask = (1 << smallestCommonBit) - 1
val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0)
// the prefix must match exactly (note: may shift ALL bits away)
(x >> smallestCommonBit) === (start >> smallestCommonBit).U &&
// firrtl constant prop range analysis can eliminate these two:
(start & uncommonMask).U <= uncommonBits &&
uncommonBits <= ((end-1) & uncommonMask).U
}
def shift(x: Int) = IdRange(start+x, end+x)
def size = end - start
def isEmpty = end == start
def range = start until end
}
object IdRange
{
def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else {
val ranges = s.sorted
(ranges.tail zip ranges.init) find { case (a, b) => a overlaps b }
}
}
// An potentially empty inclusive range of 2-powers [min, max] (in bytes)
case class TransferSizes(min: Int, max: Int)
{
def this(x: Int) = this(x, x)
require (min <= max, s"Min transfer $min > max transfer $max")
require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)")
require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max")
require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min")
require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)")
def none = min == 0
def contains(x: Int) = isPow2(x) && min <= x && x <= max
def containsLg(x: Int) = contains(1 << x)
def containsLg(x: UInt) =
if (none) false.B
else if (min == max) { log2Ceil(min).U === x }
else { log2Ceil(min).U <= x && x <= log2Ceil(max).U }
def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max)
def intersect(x: TransferSizes) =
if (x.max < min || max < x.min) TransferSizes.none
else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max))
// Not a union, because the result may contain sizes contained by neither term
// NOT TO BE CONFUSED WITH COVERPOINTS
def mincover(x: TransferSizes) = {
if (none) {
x
} else if (x.none) {
this
} else {
TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max))
}
}
override def toString() = "TransferSizes[%d, %d]".format(min, max)
}
object TransferSizes {
def apply(x: Int) = new TransferSizes(x)
val none = new TransferSizes(0)
def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _)
def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _)
implicit def asBool(x: TransferSizes) = !x.none
}
// AddressSets specify the address space managed by the manager
// Base is the base address, and mask are the bits consumed by the manager
// e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff
// e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ...
case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet]
{
// Forbid misaligned base address (and empty sets)
require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}")
require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous
// We do allow negative mask (=> ignore all high bits)
def contains(x: BigInt) = ((x ^ base) & ~mask) == 0
def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S
// turn x into an address contained in this set
def legalize(x: UInt): UInt = base.U | (mask.U & x)
// overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1)
def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0
// contains iff bitwise: x.mask => mask && contains(x.base)
def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0
// The number of bytes to which the manager must be aligned
def alignment = ((mask + 1) & ~mask)
// Is this a contiguous memory range
def contiguous = alignment == mask+1
def finite = mask >= 0
def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask }
// Widen the match function to ignore all bits in imask
def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask)
// Return an AddressSet that only contains the addresses both sets contain
def intersect(x: AddressSet): Option[AddressSet] = {
if (!overlaps(x)) {
None
} else {
val r_mask = mask & x.mask
val r_base = base | x.base
Some(AddressSet(r_base, r_mask))
}
}
def subtract(x: AddressSet): Seq[AddressSet] = {
intersect(x) match {
case None => Seq(this)
case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit =>
val nmask = (mask & (bit-1)) | remove.mask
val nbase = (remove.base ^ bit) & ~nmask
AddressSet(nbase, nmask)
}
}
}
// AddressSets have one natural Ordering (the containment order, if contiguous)
def compare(x: AddressSet) = {
val primary = (this.base - x.base).signum // smallest address first
val secondary = (x.mask - this.mask).signum // largest mask first
if (primary != 0) primary else secondary
}
// We always want to see things in hex
override def toString() = {
if (mask >= 0) {
"AddressSet(0x%x, 0x%x)".format(base, mask)
} else {
"AddressSet(0x%x, ~0x%x)".format(base, ~mask)
}
}
def toRanges = {
require (finite, "Ranges cannot be calculated on infinite mask")
val size = alignment
val fragments = mask & ~(size-1)
val bits = bitIndexes(fragments)
(BigInt(0) until (BigInt(1) << bits.size)).map { i =>
val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) }
AddressRange(off, size)
}
}
}
object AddressSet
{
val everything = AddressSet(0, -1)
def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = {
if (size == 0) tail.reverse else {
val maxBaseAlignment = base & (-base) // 0 for infinite (LSB)
val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size
val step =
if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment)
maxSizeAlignment else maxBaseAlignment
misaligned(base+step, size-step, AddressSet(base, step-1) +: tail)
}
}
def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = {
// Pair terms up by ignoring 'bit'
seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) =>
if (seq.size == 1) {
seq.head // singleton -> unaffected
} else {
key.copy(mask = key.mask | bit) // pair - widen mask by bit
}
}.toList
}
def unify(seq: Seq[AddressSet]): Seq[AddressSet] = {
val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _)
AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted
}
def enumerateMask(mask: BigInt): Seq[BigInt] = {
def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] =
if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail)
helper(0, Nil)
}
def enumerateBits(mask: BigInt): Seq[BigInt] = {
def helper(x: BigInt): Seq[BigInt] = {
if (x == 0) {
Nil
} else {
val bit = x & (-x)
bit +: helper(x & ~bit)
}
}
helper(mask)
}
}
case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean)
{
require (depth >= 0, "Buffer depth must be >= 0")
def isDefined = depth > 0
def latency = if (isDefined && !flow) 1 else 0
def apply[T <: Data](x: DecoupledIO[T]) =
if (isDefined) Queue(x, depth, flow=flow, pipe=pipe)
else x
def irrevocable[T <: Data](x: ReadyValidIO[T]) =
if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe)
else x
def sq[T <: Data](x: DecoupledIO[T]) =
if (!isDefined) x else {
val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe))
sq.io.enq <> x
sq.io.deq
}
override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "")
}
object BufferParams
{
implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false)
val default = BufferParams(2)
val none = BufferParams(0)
val flow = BufferParams(1, true, false)
val pipe = BufferParams(1, false, true)
}
case class TriStateValue(value: Boolean, set: Boolean)
{
def update(orig: Boolean) = if (set) value else orig
}
object TriStateValue
{
implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true)
def unset = TriStateValue(false, false)
}
trait DirectedBuffers[T] {
def copyIn(x: BufferParams): T
def copyOut(x: BufferParams): T
def copyInOut(x: BufferParams): T
}
trait IdMapEntry {
def name: String
def from: IdRange
def to: IdRange
def isCache: Boolean
def requestFifo: Boolean
def maxTransactionsInFlight: Option[Int]
def pretty(fmt: String) =
if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5
fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
} else {
fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
}
}
abstract class IdMap[T <: IdMapEntry] {
protected val fmt: String
val mapping: Seq[T]
def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n")
}
File Edges.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
class TLEdge(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdgeParameters(client, manager, params, sourceInfo)
{
def isAligned(address: UInt, lgSize: UInt): Bool = {
if (maxLgSize == 0) true.B else {
val mask = UIntToOH1(lgSize, maxLgSize)
(address & mask) === 0.U
}
}
def mask(address: UInt, lgSize: UInt): UInt =
MaskGen(address, lgSize, manager.beatBytes)
def staticHasData(bundle: TLChannel): Option[Boolean] = {
bundle match {
case _:TLBundleA => {
// Do there exist A messages with Data?
val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial
// Do there exist A messages without Data?
val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint
// Statically optimize the case where hasData is a constant
if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None
}
case _:TLBundleB => {
// Do there exist B messages with Data?
val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial
// Do there exist B messages without Data?
val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint
// Statically optimize the case where hasData is a constant
if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None
}
case _:TLBundleC => {
// Do there eixst C messages with Data?
val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe
// Do there exist C messages without Data?
val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe
if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None
}
case _:TLBundleD => {
// Do there eixst D messages with Data?
val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB
// Do there exist D messages without Data?
val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT
if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None
}
case _:TLBundleE => Some(false)
}
}
def isRequest(x: TLChannel): Bool = {
x match {
case a: TLBundleA => true.B
case b: TLBundleB => true.B
case c: TLBundleC => c.opcode(2) && c.opcode(1)
// opcode === TLMessages.Release ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(2) && !d.opcode(1)
// opcode === TLMessages.Grant ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
}
def isResponse(x: TLChannel): Bool = {
x match {
case a: TLBundleA => false.B
case b: TLBundleB => false.B
case c: TLBundleC => !c.opcode(2) || !c.opcode(1)
// opcode =/= TLMessages.Release &&
// opcode =/= TLMessages.ReleaseData
case d: TLBundleD => true.B // Grant isResponse + isRequest
case e: TLBundleE => true.B
}
}
def hasData(x: TLChannel): Bool = {
val opdata = x match {
case a: TLBundleA => !a.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case b: TLBundleB => !b.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case c: TLBundleC => c.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.ProbeAckData ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
staticHasData(x).map(_.B).getOrElse(opdata)
}
def opcode(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.opcode
case b: TLBundleB => b.opcode
case c: TLBundleC => c.opcode
case d: TLBundleD => d.opcode
}
}
def param(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.param
case b: TLBundleB => b.param
case c: TLBundleC => c.param
case d: TLBundleD => d.param
}
}
def size(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.size
case b: TLBundleB => b.size
case c: TLBundleC => c.size
case d: TLBundleD => d.size
}
}
def data(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.data
case b: TLBundleB => b.data
case c: TLBundleC => c.data
case d: TLBundleD => d.data
}
}
def corrupt(x: TLDataChannel): Bool = {
x match {
case a: TLBundleA => a.corrupt
case b: TLBundleB => b.corrupt
case c: TLBundleC => c.corrupt
case d: TLBundleD => d.corrupt
}
}
def mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.mask
case b: TLBundleB => b.mask
case c: TLBundleC => mask(c.address, c.size)
}
}
def full_mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => mask(a.address, a.size)
case b: TLBundleB => mask(b.address, b.size)
case c: TLBundleC => mask(c.address, c.size)
}
}
def address(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.address
case b: TLBundleB => b.address
case c: TLBundleC => c.address
}
}
def source(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.source
case b: TLBundleB => b.source
case c: TLBundleC => c.source
case d: TLBundleD => d.source
}
}
def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes)
def addr_lo(x: UInt): UInt =
if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0)
def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x))
def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x))
def numBeats(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 1.U
case bundle: TLDataChannel => {
val hasData = this.hasData(bundle)
val size = this.size(bundle)
val cutoff = log2Ceil(manager.beatBytes)
val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U
val decode = UIntToOH(size, maxLgSize+1) >> cutoff
Mux(hasData, decode | small.asUInt, 1.U)
}
}
}
def numBeats1(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 0.U
case bundle: TLDataChannel => {
if (maxLgSize == 0) {
0.U
} else {
val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes)
Mux(hasData(bundle), decode, 0.U)
}
}
}
}
def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val beats1 = numBeats1(bits)
val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W))
val counter1 = counter - 1.U
val first = counter === 0.U
val last = counter === 1.U || beats1 === 0.U
val done = last && fire
val count = (beats1 & ~counter1)
when (fire) {
counter := Mux(first, beats1, counter1)
}
(first, last, done, count)
}
def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1
def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire)
def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid)
def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2
def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire)
def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid)
def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3
def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire)
def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid)
def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3)
}
def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire)
def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid)
def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4)
}
def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire)
def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid)
def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes))
}
def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire)
def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid)
// Does the request need T permissions to be executed?
def needT(a: TLBundleA): Bool = {
val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLPermissions.NtoB -> false.B,
TLPermissions.NtoT -> true.B,
TLPermissions.BtoT -> true.B))
MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array(
TLMessages.PutFullData -> true.B,
TLMessages.PutPartialData -> true.B,
TLMessages.ArithmeticData -> true.B,
TLMessages.LogicalData -> true.B,
TLMessages.Get -> false.B,
TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLHints.PREFETCH_READ -> false.B,
TLHints.PREFETCH_WRITE -> true.B)),
TLMessages.AcquireBlock -> acq_needT,
TLMessages.AcquirePerm -> acq_needT))
}
// This is a very expensive circuit; use only if you really mean it!
def inFlight(x: TLBundle): (UInt, UInt) = {
val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W))
val bce = manager.anySupportAcquireB && client.anySupportProbe
val (a_first, a_last, _) = firstlast(x.a)
val (b_first, b_last, _) = firstlast(x.b)
val (c_first, c_last, _) = firstlast(x.c)
val (d_first, d_last, _) = firstlast(x.d)
val (e_first, e_last, _) = firstlast(x.e)
val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits))
val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits))
val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits))
val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits))
val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits))
val a_inc = x.a.fire && a_first && a_request
val b_inc = x.b.fire && b_first && b_request
val c_inc = x.c.fire && c_first && c_request
val d_inc = x.d.fire && d_first && d_request
val e_inc = x.e.fire && e_first && e_request
val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil))
val a_dec = x.a.fire && a_last && a_response
val b_dec = x.b.fire && b_last && b_response
val c_dec = x.c.fire && c_last && c_response
val d_dec = x.d.fire && d_last && d_response
val e_dec = x.e.fire && e_last && e_response
val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil))
val next_flight = flight + PopCount(inc) - PopCount(dec)
flight := next_flight
(flight, next_flight)
}
def prettySourceMapping(context: String): String = {
s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n"
}
}
class TLEdgeOut(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
// Transfers
def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquireBlock
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquirePerm
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.Release
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ReleaseData
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) =
Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B)
def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAck
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions, data)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAckData
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B)
def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink)
def GrantAck(toSink: UInt): TLBundleE = {
val e = Wire(new TLBundleE(bundle))
e.sink := toSink
e
}
// Accesses
def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsGetFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Get
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutFullFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutFullData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, mask, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutPartialFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutPartialData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = {
require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsArithmeticFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.ArithmeticData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsLogicalFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.LogicalData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = {
require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsHintFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Hint
a.param := param
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data)
def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAckData
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size)
def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.HintAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
}
class TLEdgeIn(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = {
val todo = x.filter(!_.isEmpty)
val heads = todo.map(_.head)
val tails = todo.map(_.tail)
if (todo.isEmpty) Nil else { heads +: myTranspose(tails) }
}
// Transfers
def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = {
require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}")
val legal = client.supportsProbe(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Probe
b.param := capPermissions
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.Grant
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.GrantData
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B)
def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.ReleaseAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
// Accesses
def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = {
require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsGet(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Get
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsPutFull(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutFullData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, mask, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsPutPartial(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutPartialData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsArithmetic(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.ArithmeticData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsLogical(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.LogicalData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = {
require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsHint(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Hint
b.param := param
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size)
def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied)
def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data)
def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAckData
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B)
def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied)
def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B)
def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.HintAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
}
| module TLMonitor_23( // @[Monitor.scala:36:7]
input clock, // @[Monitor.scala:36:7]
input reset, // @[Monitor.scala:36:7]
input io_in_a_ready, // @[Monitor.scala:20:14]
input io_in_a_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_size, // @[Monitor.scala:20:14]
input [6:0] io_in_a_bits_source, // @[Monitor.scala:20:14]
input [25:0] io_in_a_bits_address, // @[Monitor.scala:20:14]
input [7:0] io_in_a_bits_mask, // @[Monitor.scala:20:14]
input [63:0] io_in_a_bits_data, // @[Monitor.scala:20:14]
input io_in_a_bits_corrupt, // @[Monitor.scala:20:14]
input io_in_d_ready, // @[Monitor.scala:20:14]
input io_in_d_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14]
input [1:0] io_in_d_bits_param, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_size, // @[Monitor.scala:20:14]
input [6:0] io_in_d_bits_source, // @[Monitor.scala:20:14]
input io_in_d_bits_sink, // @[Monitor.scala:20:14]
input io_in_d_bits_denied, // @[Monitor.scala:20:14]
input [63:0] io_in_d_bits_data, // @[Monitor.scala:20:14]
input io_in_d_bits_corrupt // @[Monitor.scala:20:14]
);
wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11]
wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11]
wire io_in_a_ready_0 = io_in_a_ready; // @[Monitor.scala:36:7]
wire io_in_a_valid_0 = io_in_a_valid; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_opcode_0 = io_in_a_bits_opcode; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_param_0 = io_in_a_bits_param; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_size_0 = io_in_a_bits_size; // @[Monitor.scala:36:7]
wire [6:0] io_in_a_bits_source_0 = io_in_a_bits_source; // @[Monitor.scala:36:7]
wire [25:0] io_in_a_bits_address_0 = io_in_a_bits_address; // @[Monitor.scala:36:7]
wire [7:0] io_in_a_bits_mask_0 = io_in_a_bits_mask; // @[Monitor.scala:36:7]
wire [63:0] io_in_a_bits_data_0 = io_in_a_bits_data; // @[Monitor.scala:36:7]
wire io_in_a_bits_corrupt_0 = io_in_a_bits_corrupt; // @[Monitor.scala:36:7]
wire io_in_d_ready_0 = io_in_d_ready; // @[Monitor.scala:36:7]
wire io_in_d_valid_0 = io_in_d_valid; // @[Monitor.scala:36:7]
wire [2:0] io_in_d_bits_opcode_0 = io_in_d_bits_opcode; // @[Monitor.scala:36:7]
wire [1:0] io_in_d_bits_param_0 = io_in_d_bits_param; // @[Monitor.scala:36:7]
wire [2:0] io_in_d_bits_size_0 = io_in_d_bits_size; // @[Monitor.scala:36:7]
wire [6:0] io_in_d_bits_source_0 = io_in_d_bits_source; // @[Monitor.scala:36:7]
wire io_in_d_bits_sink_0 = io_in_d_bits_sink; // @[Monitor.scala:36:7]
wire io_in_d_bits_denied_0 = io_in_d_bits_denied; // @[Monitor.scala:36:7]
wire [63:0] io_in_d_bits_data_0 = io_in_d_bits_data; // @[Monitor.scala:36:7]
wire io_in_d_bits_corrupt_0 = io_in_d_bits_corrupt; // @[Monitor.scala:36:7]
wire sink_ok = 1'h0; // @[Monitor.scala:309:31]
wire _c_first_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_T = 1'h0; // @[Decoupled.scala:51:35]
wire c_first_beats1_opdata = 1'h0; // @[Edges.scala:102:36]
wire _c_first_last_T = 1'h0; // @[Edges.scala:232:25]
wire c_first_done = 1'h0; // @[Edges.scala:233:22]
wire _c_set_wo_ready_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_wo_ready_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_wo_ready_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_T = 1'h0; // @[Monitor.scala:772:47]
wire _c_probe_ack_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_T_1 = 1'h0; // @[Monitor.scala:772:95]
wire c_probe_ack = 1'h0; // @[Monitor.scala:772:71]
wire _same_cycle_resp_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_T_3 = 1'h0; // @[Monitor.scala:795:44]
wire _same_cycle_resp_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_T_4 = 1'h0; // @[Edges.scala:68:36]
wire _same_cycle_resp_T_5 = 1'h0; // @[Edges.scala:68:51]
wire _same_cycle_resp_T_6 = 1'h0; // @[Edges.scala:68:40]
wire _same_cycle_resp_T_7 = 1'h0; // @[Monitor.scala:795:55]
wire _same_cycle_resp_WIRE_4_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_4_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_4_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_5_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_5_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_5_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire same_cycle_resp_1 = 1'h0; // @[Monitor.scala:795:88]
wire [2:0] responseMap_0 = 3'h0; // @[Monitor.scala:643:42]
wire [2:0] responseMap_1 = 3'h0; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_0 = 3'h0; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_1 = 3'h0; // @[Monitor.scala:644:42]
wire [2:0] _c_first_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] c_first_beats1_decode = 3'h0; // @[Edges.scala:220:59]
wire [2:0] c_first_beats1 = 3'h0; // @[Edges.scala:221:14]
wire [2:0] _c_first_count_T = 3'h0; // @[Edges.scala:234:27]
wire [2:0] c_first_count = 3'h0; // @[Edges.scala:234:25]
wire [2:0] _c_first_counter_T = 3'h0; // @[Edges.scala:236:21]
wire [2:0] _c_set_wo_ready_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_4_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_4_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_4_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_5_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_5_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_5_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire _source_ok_T_3 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_5 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_9 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_11 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_15 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_17 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_21 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_23 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_27 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_35 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_55 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_57 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_61 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_63 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_67 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_69 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_73 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_75 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_79 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_87 = 1'h1; // @[Parameters.scala:56:32]
wire c_first = 1'h1; // @[Edges.scala:231:25]
wire _c_first_last_T_1 = 1'h1; // @[Edges.scala:232:43]
wire c_first_last = 1'h1; // @[Edges.scala:232:33]
wire [2:0] c_first_counter1 = 3'h7; // @[Edges.scala:230:28]
wire [3:0] _c_first_counter1_T = 4'hF; // @[Edges.scala:230:28]
wire [63:0] _c_first_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_first_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_first_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_first_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_set_wo_ready_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_set_wo_ready_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_opcodes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_opcodes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_sizes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_sizes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_opcodes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_opcodes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_sizes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_sizes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_probe_ack_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_probe_ack_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_probe_ack_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_probe_ack_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_4_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_5_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [25:0] _c_first_WIRE_bits_address = 26'h0; // @[Bundles.scala:265:74]
wire [25:0] _c_first_WIRE_1_bits_address = 26'h0; // @[Bundles.scala:265:61]
wire [25:0] _c_first_WIRE_2_bits_address = 26'h0; // @[Bundles.scala:265:74]
wire [25:0] _c_first_WIRE_3_bits_address = 26'h0; // @[Bundles.scala:265:61]
wire [25:0] _c_set_wo_ready_WIRE_bits_address = 26'h0; // @[Bundles.scala:265:74]
wire [25:0] _c_set_wo_ready_WIRE_1_bits_address = 26'h0; // @[Bundles.scala:265:61]
wire [25:0] _c_set_WIRE_bits_address = 26'h0; // @[Bundles.scala:265:74]
wire [25:0] _c_set_WIRE_1_bits_address = 26'h0; // @[Bundles.scala:265:61]
wire [25:0] _c_opcodes_set_interm_WIRE_bits_address = 26'h0; // @[Bundles.scala:265:74]
wire [25:0] _c_opcodes_set_interm_WIRE_1_bits_address = 26'h0; // @[Bundles.scala:265:61]
wire [25:0] _c_sizes_set_interm_WIRE_bits_address = 26'h0; // @[Bundles.scala:265:74]
wire [25:0] _c_sizes_set_interm_WIRE_1_bits_address = 26'h0; // @[Bundles.scala:265:61]
wire [25:0] _c_opcodes_set_WIRE_bits_address = 26'h0; // @[Bundles.scala:265:74]
wire [25:0] _c_opcodes_set_WIRE_1_bits_address = 26'h0; // @[Bundles.scala:265:61]
wire [25:0] _c_sizes_set_WIRE_bits_address = 26'h0; // @[Bundles.scala:265:74]
wire [25:0] _c_sizes_set_WIRE_1_bits_address = 26'h0; // @[Bundles.scala:265:61]
wire [25:0] _c_probe_ack_WIRE_bits_address = 26'h0; // @[Bundles.scala:265:74]
wire [25:0] _c_probe_ack_WIRE_1_bits_address = 26'h0; // @[Bundles.scala:265:61]
wire [25:0] _c_probe_ack_WIRE_2_bits_address = 26'h0; // @[Bundles.scala:265:74]
wire [25:0] _c_probe_ack_WIRE_3_bits_address = 26'h0; // @[Bundles.scala:265:61]
wire [25:0] _same_cycle_resp_WIRE_bits_address = 26'h0; // @[Bundles.scala:265:74]
wire [25:0] _same_cycle_resp_WIRE_1_bits_address = 26'h0; // @[Bundles.scala:265:61]
wire [25:0] _same_cycle_resp_WIRE_2_bits_address = 26'h0; // @[Bundles.scala:265:74]
wire [25:0] _same_cycle_resp_WIRE_3_bits_address = 26'h0; // @[Bundles.scala:265:61]
wire [25:0] _same_cycle_resp_WIRE_4_bits_address = 26'h0; // @[Bundles.scala:265:74]
wire [25:0] _same_cycle_resp_WIRE_5_bits_address = 26'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_first_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_first_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_first_WIRE_2_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_first_WIRE_3_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_set_wo_ready_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_set_wo_ready_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_set_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_set_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_opcodes_set_interm_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_opcodes_set_interm_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_sizes_set_interm_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_sizes_set_interm_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_opcodes_set_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_opcodes_set_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_sizes_set_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_sizes_set_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_probe_ack_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_probe_ack_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_probe_ack_WIRE_2_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_probe_ack_WIRE_3_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _same_cycle_resp_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _same_cycle_resp_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _same_cycle_resp_WIRE_2_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _same_cycle_resp_WIRE_3_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _same_cycle_resp_WIRE_4_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _same_cycle_resp_WIRE_5_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [15:0] _a_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _a_size_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _d_opcodes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _d_sizes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _c_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _c_size_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _d_opcodes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _d_sizes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57]
wire [16:0] _a_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _a_size_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _d_opcodes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _d_sizes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _c_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _c_size_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _d_opcodes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _d_sizes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57]
wire [15:0] _a_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _a_size_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _d_opcodes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _d_sizes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _c_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _c_size_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _d_opcodes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _d_sizes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51]
wire [1026:0] _c_opcodes_set_T_1 = 1027'h0; // @[Monitor.scala:767:54]
wire [1026:0] _c_sizes_set_T_1 = 1027'h0; // @[Monitor.scala:768:52]
wire [9:0] _c_opcodes_set_T = 10'h0; // @[Monitor.scala:767:79]
wire [9:0] _c_sizes_set_T = 10'h0; // @[Monitor.scala:768:77]
wire [3:0] _c_opcodes_set_interm_T_1 = 4'h1; // @[Monitor.scala:765:61]
wire [3:0] _c_sizes_set_interm_T_1 = 4'h1; // @[Monitor.scala:766:59]
wire [3:0] c_opcodes_set_interm = 4'h0; // @[Monitor.scala:754:40]
wire [3:0] c_sizes_set_interm = 4'h0; // @[Monitor.scala:755:40]
wire [3:0] _c_opcodes_set_interm_T = 4'h0; // @[Monitor.scala:765:53]
wire [3:0] _c_sizes_set_interm_T = 4'h0; // @[Monitor.scala:766:51]
wire [127:0] _c_set_wo_ready_T = 128'h1; // @[OneHot.scala:58:35]
wire [127:0] _c_set_T = 128'h1; // @[OneHot.scala:58:35]
wire [259:0] c_opcodes_set = 260'h0; // @[Monitor.scala:740:34]
wire [259:0] c_sizes_set = 260'h0; // @[Monitor.scala:741:34]
wire [64:0] c_set = 65'h0; // @[Monitor.scala:738:34]
wire [64:0] c_set_wo_ready = 65'h0; // @[Monitor.scala:739:34]
wire [5:0] _c_first_beats1_decode_T_2 = 6'h0; // @[package.scala:243:46]
wire [5:0] _c_first_beats1_decode_T_1 = 6'h3F; // @[package.scala:243:76]
wire [12:0] _c_first_beats1_decode_T = 13'h3F; // @[package.scala:243:71]
wire [2:0] responseMap_6 = 3'h4; // @[Monitor.scala:643:42]
wire [2:0] responseMap_7 = 3'h4; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_7 = 3'h4; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_6 = 3'h5; // @[Monitor.scala:644:42]
wire [2:0] responseMap_5 = 3'h2; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_5 = 3'h2; // @[Monitor.scala:644:42]
wire [2:0] responseMap_2 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMap_3 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMap_4 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_2 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_3 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_4 = 3'h1; // @[Monitor.scala:644:42]
wire [3:0] _a_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:637:123]
wire [3:0] _a_size_lookup_T_2 = 4'h4; // @[Monitor.scala:641:117]
wire [3:0] _d_opcodes_clr_T = 4'h4; // @[Monitor.scala:680:48]
wire [3:0] _d_sizes_clr_T = 4'h4; // @[Monitor.scala:681:48]
wire [3:0] _c_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:749:123]
wire [3:0] _c_size_lookup_T_2 = 4'h4; // @[Monitor.scala:750:119]
wire [3:0] _d_opcodes_clr_T_6 = 4'h4; // @[Monitor.scala:790:48]
wire [3:0] _d_sizes_clr_T_6 = 4'h4; // @[Monitor.scala:791:48]
wire [2:0] _mask_sizeOH_T = io_in_a_bits_size_0; // @[Misc.scala:202:34]
wire [6:0] _source_ok_uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_4 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_5 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_4 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_5 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_6 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_7 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_8 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_9 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_10 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_11 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_12 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_13 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_14 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_15 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_16 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_17 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_18 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_19 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_20 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_21 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_22 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_23 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_24 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_25 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_26 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_27 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_28 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_29 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_30 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_31 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_32 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_33 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_34 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_35 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_36 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_37 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_38 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_39 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_40 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_41 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_42 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_43 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_44 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_45 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_46 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_47 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_48 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_49 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_50 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_51 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_52 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_53 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_54 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_55 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_56 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_57 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_58 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_59 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_60 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_61 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_62 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_63 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_64 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_65 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_6 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_7 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_8 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_9 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_10 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_11 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire _source_ok_T = io_in_a_bits_source_0 == 7'h10; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_0 = _source_ok_T; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits = _source_ok_uncommonBits_T[1:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] _source_ok_T_1 = io_in_a_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_7 = io_in_a_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_13 = io_in_a_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_19 = io_in_a_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_25 = io_in_a_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_33 = io_in_a_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire _source_ok_T_2 = _source_ok_T_1 == 5'h0; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_4 = _source_ok_T_2; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_6 = _source_ok_T_4; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1 = _source_ok_T_6; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_1 = _source_ok_uncommonBits_T_1[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_8 = _source_ok_T_7 == 5'h1; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_10 = _source_ok_T_8; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_12 = _source_ok_T_10; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_2 = _source_ok_T_12; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_2 = _source_ok_uncommonBits_T_2[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_14 = _source_ok_T_13 == 5'h2; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_16 = _source_ok_T_14; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_18 = _source_ok_T_16; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_3 = _source_ok_T_18; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_3 = _source_ok_uncommonBits_T_3[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_20 = _source_ok_T_19 == 5'h3; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_22 = _source_ok_T_20; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_24 = _source_ok_T_22; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_4 = _source_ok_T_24; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_4 = _source_ok_uncommonBits_T_4[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_26 = _source_ok_T_25 == 5'hA; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_28 = _source_ok_T_26; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_29 = source_ok_uncommonBits_4 != 2'h3; // @[Parameters.scala:52:56, :57:20]
wire _source_ok_T_30 = _source_ok_T_28 & _source_ok_T_29; // @[Parameters.scala:54:67, :56:48, :57:20]
wire _source_ok_WIRE_5 = _source_ok_T_30; // @[Parameters.scala:1138:31]
wire _source_ok_T_31 = io_in_a_bits_source_0 == 7'h2B; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_6 = _source_ok_T_31; // @[Parameters.scala:1138:31]
wire _source_ok_T_32 = io_in_a_bits_source_0 == 7'h2C; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_7 = _source_ok_T_32; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_5 = _source_ok_uncommonBits_T_5[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_34 = _source_ok_T_33 == 5'h8; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_36 = _source_ok_T_34; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_37 = source_ok_uncommonBits_5 != 2'h3; // @[Parameters.scala:52:56, :57:20]
wire _source_ok_T_38 = _source_ok_T_36 & _source_ok_T_37; // @[Parameters.scala:54:67, :56:48, :57:20]
wire _source_ok_WIRE_8 = _source_ok_T_38; // @[Parameters.scala:1138:31]
wire _source_ok_T_39 = io_in_a_bits_source_0 == 7'h23; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_9 = _source_ok_T_39; // @[Parameters.scala:1138:31]
wire _source_ok_T_40 = io_in_a_bits_source_0 == 7'h24; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_10 = _source_ok_T_40; // @[Parameters.scala:1138:31]
wire _source_ok_T_41 = io_in_a_bits_source_0 == 7'h40; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_11 = _source_ok_T_41; // @[Parameters.scala:1138:31]
wire _source_ok_T_42 = _source_ok_WIRE_0 | _source_ok_WIRE_1; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_43 = _source_ok_T_42 | _source_ok_WIRE_2; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_44 = _source_ok_T_43 | _source_ok_WIRE_3; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_45 = _source_ok_T_44 | _source_ok_WIRE_4; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_46 = _source_ok_T_45 | _source_ok_WIRE_5; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_47 = _source_ok_T_46 | _source_ok_WIRE_6; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_48 = _source_ok_T_47 | _source_ok_WIRE_7; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_49 = _source_ok_T_48 | _source_ok_WIRE_8; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_50 = _source_ok_T_49 | _source_ok_WIRE_9; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_51 = _source_ok_T_50 | _source_ok_WIRE_10; // @[Parameters.scala:1138:31, :1139:46]
wire source_ok = _source_ok_T_51 | _source_ok_WIRE_11; // @[Parameters.scala:1138:31, :1139:46]
wire [12:0] _GEN = 13'h3F << io_in_a_bits_size_0; // @[package.scala:243:71]
wire [12:0] _is_aligned_mask_T; // @[package.scala:243:71]
assign _is_aligned_mask_T = _GEN; // @[package.scala:243:71]
wire [12:0] _a_first_beats1_decode_T; // @[package.scala:243:71]
assign _a_first_beats1_decode_T = _GEN; // @[package.scala:243:71]
wire [12:0] _a_first_beats1_decode_T_3; // @[package.scala:243:71]
assign _a_first_beats1_decode_T_3 = _GEN; // @[package.scala:243:71]
wire [5:0] _is_aligned_mask_T_1 = _is_aligned_mask_T[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] is_aligned_mask = ~_is_aligned_mask_T_1; // @[package.scala:243:{46,76}]
wire [25:0] _is_aligned_T = {20'h0, io_in_a_bits_address_0[5:0] & is_aligned_mask}; // @[package.scala:243:46]
wire is_aligned = _is_aligned_T == 26'h0; // @[Edges.scala:21:{16,24}]
wire [1:0] mask_sizeOH_shiftAmount = _mask_sizeOH_T[1:0]; // @[OneHot.scala:64:49]
wire [3:0] _mask_sizeOH_T_1 = 4'h1 << mask_sizeOH_shiftAmount; // @[OneHot.scala:64:49, :65:12]
wire [2:0] _mask_sizeOH_T_2 = _mask_sizeOH_T_1[2:0]; // @[OneHot.scala:65:{12,27}]
wire [2:0] mask_sizeOH = {_mask_sizeOH_T_2[2:1], 1'h1}; // @[OneHot.scala:65:27]
wire mask_sub_sub_sub_0_1 = io_in_a_bits_size_0 > 3'h2; // @[Misc.scala:206:21]
wire mask_sub_sub_size = mask_sizeOH[2]; // @[Misc.scala:202:81, :209:26]
wire mask_sub_sub_bit = io_in_a_bits_address_0[2]; // @[Misc.scala:210:26]
wire mask_sub_sub_1_2 = mask_sub_sub_bit; // @[Misc.scala:210:26, :214:27]
wire mask_sub_sub_nbit = ~mask_sub_sub_bit; // @[Misc.scala:210:26, :211:20]
wire mask_sub_sub_0_2 = mask_sub_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_sub_acc_T = mask_sub_sub_size & mask_sub_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_sub_0_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T; // @[Misc.scala:206:21, :215:{29,38}]
wire _mask_sub_sub_acc_T_1 = mask_sub_sub_size & mask_sub_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_sub_1_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T_1; // @[Misc.scala:206:21, :215:{29,38}]
wire mask_sub_size = mask_sizeOH[1]; // @[Misc.scala:202:81, :209:26]
wire mask_sub_bit = io_in_a_bits_address_0[1]; // @[Misc.scala:210:26]
wire mask_sub_nbit = ~mask_sub_bit; // @[Misc.scala:210:26, :211:20]
wire mask_sub_0_2 = mask_sub_sub_0_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_acc_T = mask_sub_size & mask_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_0_1 = mask_sub_sub_0_1 | _mask_sub_acc_T; // @[Misc.scala:215:{29,38}]
wire mask_sub_1_2 = mask_sub_sub_0_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_sub_acc_T_1 = mask_sub_size & mask_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_1_1 = mask_sub_sub_0_1 | _mask_sub_acc_T_1; // @[Misc.scala:215:{29,38}]
wire mask_sub_2_2 = mask_sub_sub_1_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_acc_T_2 = mask_sub_size & mask_sub_2_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_2_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_2; // @[Misc.scala:215:{29,38}]
wire mask_sub_3_2 = mask_sub_sub_1_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_sub_acc_T_3 = mask_sub_size & mask_sub_3_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_3_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_3; // @[Misc.scala:215:{29,38}]
wire mask_size = mask_sizeOH[0]; // @[Misc.scala:202:81, :209:26]
wire mask_bit = io_in_a_bits_address_0[0]; // @[Misc.scala:210:26]
wire mask_nbit = ~mask_bit; // @[Misc.scala:210:26, :211:20]
wire mask_eq = mask_sub_0_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T = mask_size & mask_eq; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc = mask_sub_0_1 | _mask_acc_T; // @[Misc.scala:215:{29,38}]
wire mask_eq_1 = mask_sub_0_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_1 = mask_size & mask_eq_1; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_1 = mask_sub_0_1 | _mask_acc_T_1; // @[Misc.scala:215:{29,38}]
wire mask_eq_2 = mask_sub_1_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_2 = mask_size & mask_eq_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_2 = mask_sub_1_1 | _mask_acc_T_2; // @[Misc.scala:215:{29,38}]
wire mask_eq_3 = mask_sub_1_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_3 = mask_size & mask_eq_3; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_3 = mask_sub_1_1 | _mask_acc_T_3; // @[Misc.scala:215:{29,38}]
wire mask_eq_4 = mask_sub_2_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_4 = mask_size & mask_eq_4; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_4 = mask_sub_2_1 | _mask_acc_T_4; // @[Misc.scala:215:{29,38}]
wire mask_eq_5 = mask_sub_2_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_5 = mask_size & mask_eq_5; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_5 = mask_sub_2_1 | _mask_acc_T_5; // @[Misc.scala:215:{29,38}]
wire mask_eq_6 = mask_sub_3_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_6 = mask_size & mask_eq_6; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_6 = mask_sub_3_1 | _mask_acc_T_6; // @[Misc.scala:215:{29,38}]
wire mask_eq_7 = mask_sub_3_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_7 = mask_size & mask_eq_7; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_7 = mask_sub_3_1 | _mask_acc_T_7; // @[Misc.scala:215:{29,38}]
wire [1:0] mask_lo_lo = {mask_acc_1, mask_acc}; // @[Misc.scala:215:29, :222:10]
wire [1:0] mask_lo_hi = {mask_acc_3, mask_acc_2}; // @[Misc.scala:215:29, :222:10]
wire [3:0] mask_lo = {mask_lo_hi, mask_lo_lo}; // @[Misc.scala:222:10]
wire [1:0] mask_hi_lo = {mask_acc_5, mask_acc_4}; // @[Misc.scala:215:29, :222:10]
wire [1:0] mask_hi_hi = {mask_acc_7, mask_acc_6}; // @[Misc.scala:215:29, :222:10]
wire [3:0] mask_hi = {mask_hi_hi, mask_hi_lo}; // @[Misc.scala:222:10]
wire [7:0] mask = {mask_hi, mask_lo}; // @[Misc.scala:222:10]
wire [1:0] uncommonBits = _uncommonBits_T[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_1 = _uncommonBits_T_1[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_2 = _uncommonBits_T_2[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_3 = _uncommonBits_T_3[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_4 = _uncommonBits_T_4[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_5 = _uncommonBits_T_5[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_6 = _uncommonBits_T_6[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_7 = _uncommonBits_T_7[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_8 = _uncommonBits_T_8[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_9 = _uncommonBits_T_9[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_10 = _uncommonBits_T_10[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_11 = _uncommonBits_T_11[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_12 = _uncommonBits_T_12[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_13 = _uncommonBits_T_13[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_14 = _uncommonBits_T_14[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_15 = _uncommonBits_T_15[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_16 = _uncommonBits_T_16[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_17 = _uncommonBits_T_17[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_18 = _uncommonBits_T_18[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_19 = _uncommonBits_T_19[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_20 = _uncommonBits_T_20[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_21 = _uncommonBits_T_21[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_22 = _uncommonBits_T_22[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_23 = _uncommonBits_T_23[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_24 = _uncommonBits_T_24[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_25 = _uncommonBits_T_25[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_26 = _uncommonBits_T_26[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_27 = _uncommonBits_T_27[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_28 = _uncommonBits_T_28[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_29 = _uncommonBits_T_29[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_30 = _uncommonBits_T_30[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_31 = _uncommonBits_T_31[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_32 = _uncommonBits_T_32[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_33 = _uncommonBits_T_33[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_34 = _uncommonBits_T_34[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_35 = _uncommonBits_T_35[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_36 = _uncommonBits_T_36[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_37 = _uncommonBits_T_37[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_38 = _uncommonBits_T_38[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_39 = _uncommonBits_T_39[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_40 = _uncommonBits_T_40[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_41 = _uncommonBits_T_41[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_42 = _uncommonBits_T_42[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_43 = _uncommonBits_T_43[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_44 = _uncommonBits_T_44[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_45 = _uncommonBits_T_45[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_46 = _uncommonBits_T_46[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_47 = _uncommonBits_T_47[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_48 = _uncommonBits_T_48[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_49 = _uncommonBits_T_49[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_50 = _uncommonBits_T_50[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_51 = _uncommonBits_T_51[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_52 = _uncommonBits_T_52[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_53 = _uncommonBits_T_53[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_54 = _uncommonBits_T_54[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_55 = _uncommonBits_T_55[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_56 = _uncommonBits_T_56[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_57 = _uncommonBits_T_57[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_58 = _uncommonBits_T_58[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_59 = _uncommonBits_T_59[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_60 = _uncommonBits_T_60[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_61 = _uncommonBits_T_61[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_62 = _uncommonBits_T_62[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_63 = _uncommonBits_T_63[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_64 = _uncommonBits_T_64[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_65 = _uncommonBits_T_65[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_52 = io_in_d_bits_source_0 == 7'h10; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_0 = _source_ok_T_52; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_6 = _source_ok_uncommonBits_T_6[1:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] _source_ok_T_53 = io_in_d_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_59 = io_in_d_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_65 = io_in_d_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_71 = io_in_d_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_77 = io_in_d_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_85 = io_in_d_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire _source_ok_T_54 = _source_ok_T_53 == 5'h0; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_56 = _source_ok_T_54; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_58 = _source_ok_T_56; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_1 = _source_ok_T_58; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_7 = _source_ok_uncommonBits_T_7[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_60 = _source_ok_T_59 == 5'h1; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_62 = _source_ok_T_60; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_64 = _source_ok_T_62; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_2 = _source_ok_T_64; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_8 = _source_ok_uncommonBits_T_8[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_66 = _source_ok_T_65 == 5'h2; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_68 = _source_ok_T_66; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_70 = _source_ok_T_68; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_3 = _source_ok_T_70; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_9 = _source_ok_uncommonBits_T_9[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_72 = _source_ok_T_71 == 5'h3; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_74 = _source_ok_T_72; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_76 = _source_ok_T_74; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_4 = _source_ok_T_76; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_10 = _source_ok_uncommonBits_T_10[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_78 = _source_ok_T_77 == 5'hA; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_80 = _source_ok_T_78; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_81 = source_ok_uncommonBits_10 != 2'h3; // @[Parameters.scala:52:56, :57:20]
wire _source_ok_T_82 = _source_ok_T_80 & _source_ok_T_81; // @[Parameters.scala:54:67, :56:48, :57:20]
wire _source_ok_WIRE_1_5 = _source_ok_T_82; // @[Parameters.scala:1138:31]
wire _source_ok_T_83 = io_in_d_bits_source_0 == 7'h2B; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_6 = _source_ok_T_83; // @[Parameters.scala:1138:31]
wire _source_ok_T_84 = io_in_d_bits_source_0 == 7'h2C; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_7 = _source_ok_T_84; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_11 = _source_ok_uncommonBits_T_11[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_86 = _source_ok_T_85 == 5'h8; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_88 = _source_ok_T_86; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_89 = source_ok_uncommonBits_11 != 2'h3; // @[Parameters.scala:52:56, :57:20]
wire _source_ok_T_90 = _source_ok_T_88 & _source_ok_T_89; // @[Parameters.scala:54:67, :56:48, :57:20]
wire _source_ok_WIRE_1_8 = _source_ok_T_90; // @[Parameters.scala:1138:31]
wire _source_ok_T_91 = io_in_d_bits_source_0 == 7'h23; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_9 = _source_ok_T_91; // @[Parameters.scala:1138:31]
wire _source_ok_T_92 = io_in_d_bits_source_0 == 7'h24; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_10 = _source_ok_T_92; // @[Parameters.scala:1138:31]
wire _source_ok_T_93 = io_in_d_bits_source_0 == 7'h40; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_11 = _source_ok_T_93; // @[Parameters.scala:1138:31]
wire _source_ok_T_94 = _source_ok_WIRE_1_0 | _source_ok_WIRE_1_1; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_95 = _source_ok_T_94 | _source_ok_WIRE_1_2; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_96 = _source_ok_T_95 | _source_ok_WIRE_1_3; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_97 = _source_ok_T_96 | _source_ok_WIRE_1_4; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_98 = _source_ok_T_97 | _source_ok_WIRE_1_5; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_99 = _source_ok_T_98 | _source_ok_WIRE_1_6; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_100 = _source_ok_T_99 | _source_ok_WIRE_1_7; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_101 = _source_ok_T_100 | _source_ok_WIRE_1_8; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_102 = _source_ok_T_101 | _source_ok_WIRE_1_9; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_103 = _source_ok_T_102 | _source_ok_WIRE_1_10; // @[Parameters.scala:1138:31, :1139:46]
wire source_ok_1 = _source_ok_T_103 | _source_ok_WIRE_1_11; // @[Parameters.scala:1138:31, :1139:46]
wire _T_1306 = io_in_a_ready_0 & io_in_a_valid_0; // @[Decoupled.scala:51:35]
wire _a_first_T; // @[Decoupled.scala:51:35]
assign _a_first_T = _T_1306; // @[Decoupled.scala:51:35]
wire _a_first_T_1; // @[Decoupled.scala:51:35]
assign _a_first_T_1 = _T_1306; // @[Decoupled.scala:51:35]
wire [5:0] _a_first_beats1_decode_T_1 = _a_first_beats1_decode_T[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _a_first_beats1_decode_T_2 = ~_a_first_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire [2:0] a_first_beats1_decode = _a_first_beats1_decode_T_2[5:3]; // @[package.scala:243:46]
wire _a_first_beats1_opdata_T = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7]
wire _a_first_beats1_opdata_T_1 = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7]
wire a_first_beats1_opdata = ~_a_first_beats1_opdata_T; // @[Edges.scala:92:{28,37}]
wire [2:0] a_first_beats1 = a_first_beats1_opdata ? a_first_beats1_decode : 3'h0; // @[Edges.scala:92:28, :220:59, :221:14]
reg [2:0] a_first_counter; // @[Edges.scala:229:27]
wire [3:0] _a_first_counter1_T = {1'h0, a_first_counter} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] a_first_counter1 = _a_first_counter1_T[2:0]; // @[Edges.scala:230:28]
wire a_first = a_first_counter == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _a_first_last_T = a_first_counter == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _a_first_last_T_1 = a_first_beats1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire a_first_last = _a_first_last_T | _a_first_last_T_1; // @[Edges.scala:232:{25,33,43}]
wire a_first_done = a_first_last & _a_first_T; // @[Decoupled.scala:51:35]
wire [2:0] _a_first_count_T = ~a_first_counter1; // @[Edges.scala:230:28, :234:27]
wire [2:0] a_first_count = a_first_beats1 & _a_first_count_T; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _a_first_counter_T = a_first ? a_first_beats1 : a_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
reg [2:0] opcode; // @[Monitor.scala:387:22]
reg [2:0] param; // @[Monitor.scala:388:22]
reg [2:0] size; // @[Monitor.scala:389:22]
reg [6:0] source; // @[Monitor.scala:390:22]
reg [25:0] address; // @[Monitor.scala:391:22]
wire _T_1379 = io_in_d_ready_0 & io_in_d_valid_0; // @[Decoupled.scala:51:35]
wire _d_first_T; // @[Decoupled.scala:51:35]
assign _d_first_T = _T_1379; // @[Decoupled.scala:51:35]
wire _d_first_T_1; // @[Decoupled.scala:51:35]
assign _d_first_T_1 = _T_1379; // @[Decoupled.scala:51:35]
wire _d_first_T_2; // @[Decoupled.scala:51:35]
assign _d_first_T_2 = _T_1379; // @[Decoupled.scala:51:35]
wire [12:0] _GEN_0 = 13'h3F << io_in_d_bits_size_0; // @[package.scala:243:71]
wire [12:0] _d_first_beats1_decode_T; // @[package.scala:243:71]
assign _d_first_beats1_decode_T = _GEN_0; // @[package.scala:243:71]
wire [12:0] _d_first_beats1_decode_T_3; // @[package.scala:243:71]
assign _d_first_beats1_decode_T_3 = _GEN_0; // @[package.scala:243:71]
wire [12:0] _d_first_beats1_decode_T_6; // @[package.scala:243:71]
assign _d_first_beats1_decode_T_6 = _GEN_0; // @[package.scala:243:71]
wire [5:0] _d_first_beats1_decode_T_1 = _d_first_beats1_decode_T[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _d_first_beats1_decode_T_2 = ~_d_first_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire [2:0] d_first_beats1_decode = _d_first_beats1_decode_T_2[5:3]; // @[package.scala:243:46]
wire d_first_beats1_opdata = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire d_first_beats1_opdata_1 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire d_first_beats1_opdata_2 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire [2:0] d_first_beats1 = d_first_beats1_opdata ? d_first_beats1_decode : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [2:0] d_first_counter; // @[Edges.scala:229:27]
wire [3:0] _d_first_counter1_T = {1'h0, d_first_counter} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] d_first_counter1 = _d_first_counter1_T[2:0]; // @[Edges.scala:230:28]
wire d_first = d_first_counter == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T = d_first_counter == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_1 = d_first_beats1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last = _d_first_last_T | _d_first_last_T_1; // @[Edges.scala:232:{25,33,43}]
wire d_first_done = d_first_last & _d_first_T; // @[Decoupled.scala:51:35]
wire [2:0] _d_first_count_T = ~d_first_counter1; // @[Edges.scala:230:28, :234:27]
wire [2:0] d_first_count = d_first_beats1 & _d_first_count_T; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _d_first_counter_T = d_first ? d_first_beats1 : d_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
reg [2:0] opcode_1; // @[Monitor.scala:538:22]
reg [1:0] param_1; // @[Monitor.scala:539:22]
reg [2:0] size_1; // @[Monitor.scala:540:22]
reg [6:0] source_1; // @[Monitor.scala:541:22]
reg sink; // @[Monitor.scala:542:22]
reg denied; // @[Monitor.scala:543:22]
reg [64:0] inflight; // @[Monitor.scala:614:27]
reg [259:0] inflight_opcodes; // @[Monitor.scala:616:35]
reg [259:0] inflight_sizes; // @[Monitor.scala:618:33]
wire [5:0] _a_first_beats1_decode_T_4 = _a_first_beats1_decode_T_3[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _a_first_beats1_decode_T_5 = ~_a_first_beats1_decode_T_4; // @[package.scala:243:{46,76}]
wire [2:0] a_first_beats1_decode_1 = _a_first_beats1_decode_T_5[5:3]; // @[package.scala:243:46]
wire a_first_beats1_opdata_1 = ~_a_first_beats1_opdata_T_1; // @[Edges.scala:92:{28,37}]
wire [2:0] a_first_beats1_1 = a_first_beats1_opdata_1 ? a_first_beats1_decode_1 : 3'h0; // @[Edges.scala:92:28, :220:59, :221:14]
reg [2:0] a_first_counter_1; // @[Edges.scala:229:27]
wire [3:0] _a_first_counter1_T_1 = {1'h0, a_first_counter_1} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] a_first_counter1_1 = _a_first_counter1_T_1[2:0]; // @[Edges.scala:230:28]
wire a_first_1 = a_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _a_first_last_T_2 = a_first_counter_1 == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _a_first_last_T_3 = a_first_beats1_1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire a_first_last_1 = _a_first_last_T_2 | _a_first_last_T_3; // @[Edges.scala:232:{25,33,43}]
wire a_first_done_1 = a_first_last_1 & _a_first_T_1; // @[Decoupled.scala:51:35]
wire [2:0] _a_first_count_T_1 = ~a_first_counter1_1; // @[Edges.scala:230:28, :234:27]
wire [2:0] a_first_count_1 = a_first_beats1_1 & _a_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _a_first_counter_T_1 = a_first_1 ? a_first_beats1_1 : a_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [5:0] _d_first_beats1_decode_T_4 = _d_first_beats1_decode_T_3[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _d_first_beats1_decode_T_5 = ~_d_first_beats1_decode_T_4; // @[package.scala:243:{46,76}]
wire [2:0] d_first_beats1_decode_1 = _d_first_beats1_decode_T_5[5:3]; // @[package.scala:243:46]
wire [2:0] d_first_beats1_1 = d_first_beats1_opdata_1 ? d_first_beats1_decode_1 : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [2:0] d_first_counter_1; // @[Edges.scala:229:27]
wire [3:0] _d_first_counter1_T_1 = {1'h0, d_first_counter_1} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] d_first_counter1_1 = _d_first_counter1_T_1[2:0]; // @[Edges.scala:230:28]
wire d_first_1 = d_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T_2 = d_first_counter_1 == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_3 = d_first_beats1_1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last_1 = _d_first_last_T_2 | _d_first_last_T_3; // @[Edges.scala:232:{25,33,43}]
wire d_first_done_1 = d_first_last_1 & _d_first_T_1; // @[Decoupled.scala:51:35]
wire [2:0] _d_first_count_T_1 = ~d_first_counter1_1; // @[Edges.scala:230:28, :234:27]
wire [2:0] d_first_count_1 = d_first_beats1_1 & _d_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _d_first_counter_T_1 = d_first_1 ? d_first_beats1_1 : d_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [64:0] a_set; // @[Monitor.scala:626:34]
wire [64:0] a_set_wo_ready; // @[Monitor.scala:627:34]
wire [259:0] a_opcodes_set; // @[Monitor.scala:630:33]
wire [259:0] a_sizes_set; // @[Monitor.scala:632:31]
wire [2:0] a_opcode_lookup; // @[Monitor.scala:635:35]
wire [9:0] _GEN_1 = {1'h0, io_in_d_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :637:69]
wire [9:0] _a_opcode_lookup_T; // @[Monitor.scala:637:69]
assign _a_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69]
wire [9:0] _a_size_lookup_T; // @[Monitor.scala:641:65]
assign _a_size_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :641:65]
wire [9:0] _d_opcodes_clr_T_4; // @[Monitor.scala:680:101]
assign _d_opcodes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :680:101]
wire [9:0] _d_sizes_clr_T_4; // @[Monitor.scala:681:99]
assign _d_sizes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :681:99]
wire [9:0] _c_opcode_lookup_T; // @[Monitor.scala:749:69]
assign _c_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :749:69]
wire [9:0] _c_size_lookup_T; // @[Monitor.scala:750:67]
assign _c_size_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :750:67]
wire [9:0] _d_opcodes_clr_T_10; // @[Monitor.scala:790:101]
assign _d_opcodes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :790:101]
wire [9:0] _d_sizes_clr_T_10; // @[Monitor.scala:791:99]
assign _d_sizes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :791:99]
wire [259:0] _a_opcode_lookup_T_1 = inflight_opcodes >> _a_opcode_lookup_T; // @[Monitor.scala:616:35, :637:{44,69}]
wire [259:0] _a_opcode_lookup_T_6 = {256'h0, _a_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:637:{44,97}]
wire [259:0] _a_opcode_lookup_T_7 = {1'h0, _a_opcode_lookup_T_6[259:1]}; // @[Monitor.scala:637:{97,152}]
assign a_opcode_lookup = _a_opcode_lookup_T_7[2:0]; // @[Monitor.scala:635:35, :637:{21,152}]
wire [3:0] a_size_lookup; // @[Monitor.scala:639:33]
wire [259:0] _a_size_lookup_T_1 = inflight_sizes >> _a_size_lookup_T; // @[Monitor.scala:618:33, :641:{40,65}]
wire [259:0] _a_size_lookup_T_6 = {256'h0, _a_size_lookup_T_1[3:0]}; // @[Monitor.scala:641:{40,91}]
wire [259:0] _a_size_lookup_T_7 = {1'h0, _a_size_lookup_T_6[259:1]}; // @[Monitor.scala:641:{91,144}]
assign a_size_lookup = _a_size_lookup_T_7[3:0]; // @[Monitor.scala:639:33, :641:{19,144}]
wire [3:0] a_opcodes_set_interm; // @[Monitor.scala:646:40]
wire [3:0] a_sizes_set_interm; // @[Monitor.scala:648:38]
wire _same_cycle_resp_T = io_in_a_valid_0 & a_first_1; // @[Monitor.scala:36:7, :651:26, :684:44]
wire [127:0] _GEN_2 = 128'h1 << io_in_a_bits_source_0; // @[OneHot.scala:58:35]
wire [127:0] _a_set_wo_ready_T; // @[OneHot.scala:58:35]
assign _a_set_wo_ready_T = _GEN_2; // @[OneHot.scala:58:35]
wire [127:0] _a_set_T; // @[OneHot.scala:58:35]
assign _a_set_T = _GEN_2; // @[OneHot.scala:58:35]
assign a_set_wo_ready = _same_cycle_resp_T ? _a_set_wo_ready_T[64:0] : 65'h0; // @[OneHot.scala:58:35]
wire _T_1232 = _T_1306 & a_first_1; // @[Decoupled.scala:51:35]
assign a_set = _T_1232 ? _a_set_T[64:0] : 65'h0; // @[OneHot.scala:58:35]
wire [3:0] _a_opcodes_set_interm_T = {io_in_a_bits_opcode_0, 1'h0}; // @[Monitor.scala:36:7, :657:53]
wire [3:0] _a_opcodes_set_interm_T_1 = {_a_opcodes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:657:{53,61}]
assign a_opcodes_set_interm = _T_1232 ? _a_opcodes_set_interm_T_1 : 4'h0; // @[Monitor.scala:646:40, :655:{25,70}, :657:{28,61}]
wire [3:0] _a_sizes_set_interm_T = {io_in_a_bits_size_0, 1'h0}; // @[Monitor.scala:36:7, :658:51]
wire [3:0] _a_sizes_set_interm_T_1 = {_a_sizes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:658:{51,59}]
assign a_sizes_set_interm = _T_1232 ? _a_sizes_set_interm_T_1 : 4'h0; // @[Monitor.scala:648:38, :655:{25,70}, :658:{28,59}]
wire [9:0] _GEN_3 = {1'h0, io_in_a_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :659:79]
wire [9:0] _a_opcodes_set_T; // @[Monitor.scala:659:79]
assign _a_opcodes_set_T = _GEN_3; // @[Monitor.scala:659:79]
wire [9:0] _a_sizes_set_T; // @[Monitor.scala:660:77]
assign _a_sizes_set_T = _GEN_3; // @[Monitor.scala:659:79, :660:77]
wire [1026:0] _a_opcodes_set_T_1 = {1023'h0, a_opcodes_set_interm} << _a_opcodes_set_T; // @[Monitor.scala:646:40, :659:{54,79}]
assign a_opcodes_set = _T_1232 ? _a_opcodes_set_T_1[259:0] : 260'h0; // @[Monitor.scala:630:33, :655:{25,70}, :659:{28,54}]
wire [1026:0] _a_sizes_set_T_1 = {1023'h0, a_sizes_set_interm} << _a_sizes_set_T; // @[Monitor.scala:648:38, :659:54, :660:{52,77}]
assign a_sizes_set = _T_1232 ? _a_sizes_set_T_1[259:0] : 260'h0; // @[Monitor.scala:632:31, :655:{25,70}, :660:{28,52}]
wire [64:0] d_clr; // @[Monitor.scala:664:34]
wire [64:0] d_clr_wo_ready; // @[Monitor.scala:665:34]
wire [259:0] d_opcodes_clr; // @[Monitor.scala:668:33]
wire [259:0] d_sizes_clr; // @[Monitor.scala:670:31]
wire _GEN_4 = io_in_d_bits_opcode_0 == 3'h6; // @[Monitor.scala:36:7, :673:46]
wire d_release_ack; // @[Monitor.scala:673:46]
assign d_release_ack = _GEN_4; // @[Monitor.scala:673:46]
wire d_release_ack_1; // @[Monitor.scala:783:46]
assign d_release_ack_1 = _GEN_4; // @[Monitor.scala:673:46, :783:46]
wire _T_1278 = io_in_d_valid_0 & d_first_1; // @[Monitor.scala:36:7, :674:26]
wire [127:0] _GEN_5 = 128'h1 << io_in_d_bits_source_0; // @[OneHot.scala:58:35]
wire [127:0] _d_clr_wo_ready_T; // @[OneHot.scala:58:35]
assign _d_clr_wo_ready_T = _GEN_5; // @[OneHot.scala:58:35]
wire [127:0] _d_clr_T; // @[OneHot.scala:58:35]
assign _d_clr_T = _GEN_5; // @[OneHot.scala:58:35]
wire [127:0] _d_clr_wo_ready_T_1; // @[OneHot.scala:58:35]
assign _d_clr_wo_ready_T_1 = _GEN_5; // @[OneHot.scala:58:35]
wire [127:0] _d_clr_T_1; // @[OneHot.scala:58:35]
assign _d_clr_T_1 = _GEN_5; // @[OneHot.scala:58:35]
assign d_clr_wo_ready = _T_1278 & ~d_release_ack ? _d_clr_wo_ready_T[64:0] : 65'h0; // @[OneHot.scala:58:35]
wire _T_1247 = _T_1379 & d_first_1 & ~d_release_ack; // @[Decoupled.scala:51:35]
assign d_clr = _T_1247 ? _d_clr_T[64:0] : 65'h0; // @[OneHot.scala:58:35]
wire [1038:0] _d_opcodes_clr_T_5 = 1039'hF << _d_opcodes_clr_T_4; // @[Monitor.scala:680:{76,101}]
assign d_opcodes_clr = _T_1247 ? _d_opcodes_clr_T_5[259:0] : 260'h0; // @[Monitor.scala:668:33, :678:{25,70,89}, :680:{21,76}]
wire [1038:0] _d_sizes_clr_T_5 = 1039'hF << _d_sizes_clr_T_4; // @[Monitor.scala:681:{74,99}]
assign d_sizes_clr = _T_1247 ? _d_sizes_clr_T_5[259:0] : 260'h0; // @[Monitor.scala:670:31, :678:{25,70,89}, :681:{21,74}]
wire _same_cycle_resp_T_1 = _same_cycle_resp_T; // @[Monitor.scala:684:{44,55}]
wire _same_cycle_resp_T_2 = io_in_a_bits_source_0 == io_in_d_bits_source_0; // @[Monitor.scala:36:7, :684:113]
wire same_cycle_resp = _same_cycle_resp_T_1 & _same_cycle_resp_T_2; // @[Monitor.scala:684:{55,88,113}]
wire [64:0] _inflight_T = inflight | a_set; // @[Monitor.scala:614:27, :626:34, :705:27]
wire [64:0] _inflight_T_1 = ~d_clr; // @[Monitor.scala:664:34, :705:38]
wire [64:0] _inflight_T_2 = _inflight_T & _inflight_T_1; // @[Monitor.scala:705:{27,36,38}]
wire [259:0] _inflight_opcodes_T = inflight_opcodes | a_opcodes_set; // @[Monitor.scala:616:35, :630:33, :706:43]
wire [259:0] _inflight_opcodes_T_1 = ~d_opcodes_clr; // @[Monitor.scala:668:33, :706:62]
wire [259:0] _inflight_opcodes_T_2 = _inflight_opcodes_T & _inflight_opcodes_T_1; // @[Monitor.scala:706:{43,60,62}]
wire [259:0] _inflight_sizes_T = inflight_sizes | a_sizes_set; // @[Monitor.scala:618:33, :632:31, :707:39]
wire [259:0] _inflight_sizes_T_1 = ~d_sizes_clr; // @[Monitor.scala:670:31, :707:56]
wire [259:0] _inflight_sizes_T_2 = _inflight_sizes_T & _inflight_sizes_T_1; // @[Monitor.scala:707:{39,54,56}]
reg [31:0] watchdog; // @[Monitor.scala:709:27]
wire [32:0] _watchdog_T = {1'h0, watchdog} + 33'h1; // @[Monitor.scala:709:27, :714:26]
wire [31:0] _watchdog_T_1 = _watchdog_T[31:0]; // @[Monitor.scala:714:26]
reg [64:0] inflight_1; // @[Monitor.scala:726:35]
wire [64:0] _inflight_T_3 = inflight_1; // @[Monitor.scala:726:35, :814:35]
reg [259:0] inflight_opcodes_1; // @[Monitor.scala:727:35]
wire [259:0] _inflight_opcodes_T_3 = inflight_opcodes_1; // @[Monitor.scala:727:35, :815:43]
reg [259:0] inflight_sizes_1; // @[Monitor.scala:728:35]
wire [259:0] _inflight_sizes_T_3 = inflight_sizes_1; // @[Monitor.scala:728:35, :816:41]
wire [5:0] _d_first_beats1_decode_T_7 = _d_first_beats1_decode_T_6[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _d_first_beats1_decode_T_8 = ~_d_first_beats1_decode_T_7; // @[package.scala:243:{46,76}]
wire [2:0] d_first_beats1_decode_2 = _d_first_beats1_decode_T_8[5:3]; // @[package.scala:243:46]
wire [2:0] d_first_beats1_2 = d_first_beats1_opdata_2 ? d_first_beats1_decode_2 : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [2:0] d_first_counter_2; // @[Edges.scala:229:27]
wire [3:0] _d_first_counter1_T_2 = {1'h0, d_first_counter_2} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] d_first_counter1_2 = _d_first_counter1_T_2[2:0]; // @[Edges.scala:230:28]
wire d_first_2 = d_first_counter_2 == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T_4 = d_first_counter_2 == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_5 = d_first_beats1_2 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last_2 = _d_first_last_T_4 | _d_first_last_T_5; // @[Edges.scala:232:{25,33,43}]
wire d_first_done_2 = d_first_last_2 & _d_first_T_2; // @[Decoupled.scala:51:35]
wire [2:0] _d_first_count_T_2 = ~d_first_counter1_2; // @[Edges.scala:230:28, :234:27]
wire [2:0] d_first_count_2 = d_first_beats1_2 & _d_first_count_T_2; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _d_first_counter_T_2 = d_first_2 ? d_first_beats1_2 : d_first_counter1_2; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [3:0] c_opcode_lookup; // @[Monitor.scala:747:35]
wire [3:0] c_size_lookup; // @[Monitor.scala:748:35]
wire [259:0] _c_opcode_lookup_T_1 = inflight_opcodes_1 >> _c_opcode_lookup_T; // @[Monitor.scala:727:35, :749:{44,69}]
wire [259:0] _c_opcode_lookup_T_6 = {256'h0, _c_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:749:{44,97}]
wire [259:0] _c_opcode_lookup_T_7 = {1'h0, _c_opcode_lookup_T_6[259:1]}; // @[Monitor.scala:749:{97,152}]
assign c_opcode_lookup = _c_opcode_lookup_T_7[3:0]; // @[Monitor.scala:747:35, :749:{21,152}]
wire [259:0] _c_size_lookup_T_1 = inflight_sizes_1 >> _c_size_lookup_T; // @[Monitor.scala:728:35, :750:{42,67}]
wire [259:0] _c_size_lookup_T_6 = {256'h0, _c_size_lookup_T_1[3:0]}; // @[Monitor.scala:750:{42,93}]
wire [259:0] _c_size_lookup_T_7 = {1'h0, _c_size_lookup_T_6[259:1]}; // @[Monitor.scala:750:{93,146}]
assign c_size_lookup = _c_size_lookup_T_7[3:0]; // @[Monitor.scala:748:35, :750:{21,146}]
wire [64:0] d_clr_1; // @[Monitor.scala:774:34]
wire [64:0] d_clr_wo_ready_1; // @[Monitor.scala:775:34]
wire [259:0] d_opcodes_clr_1; // @[Monitor.scala:776:34]
wire [259:0] d_sizes_clr_1; // @[Monitor.scala:777:34]
wire _T_1350 = io_in_d_valid_0 & d_first_2; // @[Monitor.scala:36:7, :784:26]
assign d_clr_wo_ready_1 = _T_1350 & d_release_ack_1 ? _d_clr_wo_ready_T_1[64:0] : 65'h0; // @[OneHot.scala:58:35]
wire _T_1332 = _T_1379 & d_first_2 & d_release_ack_1; // @[Decoupled.scala:51:35]
assign d_clr_1 = _T_1332 ? _d_clr_T_1[64:0] : 65'h0; // @[OneHot.scala:58:35]
wire [1038:0] _d_opcodes_clr_T_11 = 1039'hF << _d_opcodes_clr_T_10; // @[Monitor.scala:790:{76,101}]
assign d_opcodes_clr_1 = _T_1332 ? _d_opcodes_clr_T_11[259:0] : 260'h0; // @[Monitor.scala:776:34, :788:{25,70,88}, :790:{21,76}]
wire [1038:0] _d_sizes_clr_T_11 = 1039'hF << _d_sizes_clr_T_10; // @[Monitor.scala:791:{74,99}]
assign d_sizes_clr_1 = _T_1332 ? _d_sizes_clr_T_11[259:0] : 260'h0; // @[Monitor.scala:777:34, :788:{25,70,88}, :791:{21,74}]
wire _same_cycle_resp_T_8 = io_in_d_bits_source_0 == 7'h0; // @[Monitor.scala:36:7, :795:113]
wire [64:0] _inflight_T_4 = ~d_clr_1; // @[Monitor.scala:774:34, :814:46]
wire [64:0] _inflight_T_5 = _inflight_T_3 & _inflight_T_4; // @[Monitor.scala:814:{35,44,46}]
wire [259:0] _inflight_opcodes_T_4 = ~d_opcodes_clr_1; // @[Monitor.scala:776:34, :815:62]
wire [259:0] _inflight_opcodes_T_5 = _inflight_opcodes_T_3 & _inflight_opcodes_T_4; // @[Monitor.scala:815:{43,60,62}]
wire [259:0] _inflight_sizes_T_4 = ~d_sizes_clr_1; // @[Monitor.scala:777:34, :816:58]
wire [259:0] _inflight_sizes_T_5 = _inflight_sizes_T_3 & _inflight_sizes_T_4; // @[Monitor.scala:816:{41,56,58}]
reg [31:0] watchdog_1; // @[Monitor.scala:818:27] |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File SynchronizerReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util.{RegEnable, Cat}
/** These wrap behavioral
* shift and next registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
*
* These are built up of *ResetSynchronizerPrimitiveShiftReg,
* intended to be replaced by the integrator's metastable flops chains or replaced
* at this level if they have a multi-bit wide synchronizer primitive.
* The different types vary in their reset behavior:
* NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin
* AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep
* 1-bit-wide shift registers.
* SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg
*
* [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference.
*
* ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross
* Clock Domains.
*/
object SynchronizerResetType extends Enumeration {
val NonSync, Inferred, Sync, Async = Value
}
// Note: this should not be used directly.
// Use the companion object to generate this with the correct reset type mixin.
private class SynchronizerPrimitiveShiftReg(
sync: Int,
init: Boolean,
resetType: SynchronizerResetType.Value)
extends AbstractPipelineReg(1) {
val initInt = if (init) 1 else 0
val initPostfix = resetType match {
case SynchronizerResetType.NonSync => ""
case _ => s"_i${initInt}"
}
override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}"
val chain = List.tabulate(sync) { i =>
val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B)
reg.suggestName(s"sync_$i")
}
chain.last := io.d.asBool
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink := source
}
io.q := chain.head.asUInt
}
private object SynchronizerPrimitiveShiftReg {
def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = {
val gen: () => SynchronizerPrimitiveShiftReg = resetType match {
case SynchronizerResetType.NonSync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
case SynchronizerResetType.Async =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset
case SynchronizerResetType.Sync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset
case SynchronizerResetType.Inferred =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
}
AbstractPipelineReg(gen(), in)
}
}
// Note: This module may end up with a non-AsyncReset type reset.
// But the Primitives within will always have AsyncReset type.
class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int)
extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asAsyncReset){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async)
}
}
io.q := Cat(output.reverse)
}
object AsyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
// Note: This module may end up with a non-Bool type reset.
// But the Primitives within will always have Bool reset type.
@deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2")
class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asBool){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync)
}
}
io.q := Cat(output.reverse)
}
object SyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred)
}
io.q := Cat(output.reverse)
}
object ResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}"
val output = Seq.tabulate(w) { i =>
SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync)
}
io.q := Cat(output.reverse)
}
object SynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, None)
def apply [T <: Data](in: T): T =
apply (in, 3, None)
}
class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module {
override def desiredName = s"ClockCrossingReg_w${w}"
val io = IO(new Bundle{
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
val en = Input(Bool())
})
val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en)
io.q := cdc_reg
}
object ClockCrossingReg {
def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = {
val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit))
name.foreach{ cdc_reg.suggestName(_) }
cdc_reg.io.d := in.asUInt
cdc_reg.io.en := en
cdc_reg.io.q.asTypeOf(in)
}
}
| module AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_171( // @[SynchronizerReg.scala:68:19]
input clock, // @[SynchronizerReg.scala:68:19]
input reset, // @[SynchronizerReg.scala:68:19]
input io_d, // @[ShiftReg.scala:36:14]
output io_q // @[ShiftReg.scala:36:14]
);
wire io_d_0 = io_d; // @[SynchronizerReg.scala:68:19]
wire _sync_2_T = io_d_0; // @[SynchronizerReg.scala:54:22, :68:19]
wire io_q_0; // @[SynchronizerReg.scala:68:19]
reg sync_0; // @[SynchronizerReg.scala:51:87]
assign io_q_0 = sync_0; // @[SynchronizerReg.scala:51:87, :68:19]
reg sync_1; // @[SynchronizerReg.scala:51:87]
reg sync_2; // @[SynchronizerReg.scala:51:87]
always @(posedge clock or posedge reset) begin // @[SynchronizerReg.scala:68:19]
if (reset) begin // @[SynchronizerReg.scala:68:19]
sync_0 <= 1'h0; // @[SynchronizerReg.scala:51:87]
sync_1 <= 1'h0; // @[SynchronizerReg.scala:51:87]
sync_2 <= 1'h0; // @[SynchronizerReg.scala:51:87]
end
else begin // @[SynchronizerReg.scala:68:19]
sync_0 <= sync_1; // @[SynchronizerReg.scala:51:87]
sync_1 <= sync_2; // @[SynchronizerReg.scala:51:87]
sync_2 <= _sync_2_T; // @[SynchronizerReg.scala:51:87, :54:22]
end
always @(posedge, posedge) |
Generate the Verilog code corresponding to the following Chisel files.
File Monitor.scala:
package constellation.channel
import chisel3._
import chisel3.util._
import freechips.rocketchip.diplomacy._
import org.chipsalliance.cde.config.{Parameters}
import freechips.rocketchip.util._
import constellation.noc.{HasNoCParams}
class NoCMonitor(val cParam: ChannelParams)(implicit val p: Parameters) extends Module with HasNoCParams {
val io = IO(new Bundle {
val in = Input(new Channel(cParam))
})
val in_flight = RegInit(VecInit(Seq.fill(cParam.nVirtualChannels) { false.B }))
for (i <- 0 until cParam.srcSpeedup) {
val flit = io.in.flit(i)
when (flit.valid) {
when (flit.bits.head) {
in_flight(flit.bits.virt_channel_id) := true.B
assert (!in_flight(flit.bits.virt_channel_id), "Flit head/tail sequencing is broken")
}
when (flit.bits.tail) {
in_flight(flit.bits.virt_channel_id) := false.B
}
}
val possibleFlows = cParam.possibleFlows
when (flit.valid && flit.bits.head) {
cParam match {
case n: ChannelParams => n.virtualChannelParams.zipWithIndex.foreach { case (v,i) =>
assert(flit.bits.virt_channel_id =/= i.U || v.possibleFlows.toSeq.map(_.isFlow(flit.bits.flow)).orR)
}
case _ => assert(cParam.possibleFlows.toSeq.map(_.isFlow(flit.bits.flow)).orR)
}
}
}
}
File Types.scala:
package constellation.routing
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.{Parameters}
import constellation.noc.{HasNoCParams}
import constellation.channel.{Flit}
/** A representation for 1 specific virtual channel in wormhole routing
*
* @param src the source node
* @param vc ID for the virtual channel
* @param dst the destination node
* @param n_vc the number of virtual channels
*/
// BEGIN: ChannelRoutingInfo
case class ChannelRoutingInfo(
src: Int,
dst: Int,
vc: Int,
n_vc: Int
) {
// END: ChannelRoutingInfo
require (src >= -1 && dst >= -1 && vc >= 0, s"Illegal $this")
require (!(src == -1 && dst == -1), s"Illegal $this")
require (vc < n_vc, s"Illegal $this")
val isIngress = src == -1
val isEgress = dst == -1
}
/** Represents the properties of a packet that are relevant for routing
* ingressId and egressId uniquely identify a flow, but vnet and dst are used here
* to simplify the implementation of routingrelations
*
* @param ingressId packet's source ingress point
* @param egressId packet's destination egress point
* @param vNet virtual subnetwork identifier
* @param dst packet's destination node ID
*/
// BEGIN: FlowRoutingInfo
case class FlowRoutingInfo(
ingressId: Int,
egressId: Int,
vNetId: Int,
ingressNode: Int,
ingressNodeId: Int,
egressNode: Int,
egressNodeId: Int,
fifo: Boolean
) {
// END: FlowRoutingInfo
def isFlow(f: FlowRoutingBundle): Bool = {
(f.ingress_node === ingressNode.U &&
f.egress_node === egressNode.U &&
f.ingress_node_id === ingressNodeId.U &&
f.egress_node_id === egressNodeId.U)
}
def asLiteral(b: FlowRoutingBundle): BigInt = {
Seq(
(vNetId , b.vnet_id),
(ingressNode , b.ingress_node),
(ingressNodeId , b.ingress_node_id),
(egressNode , b.egress_node),
(egressNodeId , b.egress_node_id)
).foldLeft(0)((l, t) => {
(l << t._2.getWidth) | t._1
})
}
}
class FlowRoutingBundle(implicit val p: Parameters) extends Bundle with HasNoCParams {
// Instead of tracking ingress/egress ID, track the physical destination id and the offset at the destination
// This simplifies the routing tables
val vnet_id = UInt(log2Ceil(nVirtualNetworks).W)
val ingress_node = UInt(log2Ceil(nNodes).W)
val ingress_node_id = UInt(log2Ceil(maxIngressesAtNode).W)
val egress_node = UInt(log2Ceil(nNodes).W)
val egress_node_id = UInt(log2Ceil(maxEgressesAtNode).W)
}
| module NoCMonitor_42( // @[Monitor.scala:11:7]
input clock, // @[Monitor.scala:11:7]
input reset, // @[Monitor.scala:11:7]
input io_in_flit_0_valid, // @[Monitor.scala:12:14]
input io_in_flit_0_bits_head, // @[Monitor.scala:12:14]
input io_in_flit_0_bits_tail, // @[Monitor.scala:12:14]
input [3:0] io_in_flit_0_bits_flow_ingress_node, // @[Monitor.scala:12:14]
input [2:0] io_in_flit_0_bits_flow_ingress_node_id, // @[Monitor.scala:12:14]
input [3:0] io_in_flit_0_bits_flow_egress_node, // @[Monitor.scala:12:14]
input [1:0] io_in_flit_0_bits_flow_egress_node_id, // @[Monitor.scala:12:14]
input [1:0] io_in_flit_0_bits_virt_channel_id // @[Monitor.scala:12:14]
);
reg in_flight_0; // @[Monitor.scala:16:26]
reg in_flight_1; // @[Monitor.scala:16:26]
reg in_flight_2; // @[Monitor.scala:16:26]
wire _GEN = io_in_flit_0_bits_virt_channel_id == 2'h1; // @[Monitor.scala:21:46]
wire _GEN_0 = io_in_flit_0_bits_virt_channel_id == 2'h2; // @[Monitor.scala:21:46] |
Generate the Verilog code corresponding to the following Chisel files.
File dcache.scala:
//******************************************************************************
// Ported from Rocket-Chip
// See LICENSE.Berkeley and LICENSE.SiFive in Rocket-Chip for license details.
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
package boom.v4.lsu
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.tilelink._
import freechips.rocketchip.tile._
import freechips.rocketchip.util._
import freechips.rocketchip.rocket._
import boom.v4.common._
import boom.v4.exu.BrUpdateInfo
import boom.v4.util._
class BoomWritebackUnit(implicit edge: TLEdgeOut, p: Parameters) extends L1HellaCacheModule()(p) {
val io = IO(new Bundle {
val req = Flipped(Decoupled(new WritebackReq(edge.bundle)))
val meta_read = Decoupled(new L1MetaReadReq)
val resp = Output(Bool())
val idx = Output(Valid(UInt()))
val data_req = Decoupled(new L1DataReadReq)
val data_resp = Input(UInt(encRowBits.W))
val mem_grant = Input(Bool())
val release = Decoupled(new TLBundleC(edge.bundle))
val lsu_release = Decoupled(new TLBundleC(edge.bundle))
})
val req = Reg(new WritebackReq(edge.bundle))
val s_invalid :: s_fill_buffer :: s_lsu_release :: s_active :: s_grant :: Nil = Enum(5)
val state = RegInit(s_invalid)
val r1_data_req_fired = RegInit(false.B)
val r2_data_req_fired = RegInit(false.B)
val r1_data_req_cnt = Reg(UInt(log2Up(refillCycles+1).W))
val r2_data_req_cnt = Reg(UInt(log2Up(refillCycles+1).W))
val data_req_cnt = RegInit(0.U(log2Up(refillCycles+1).W))
val (_, last_beat, all_beats_done, beat_count) = edge.count(io.release)
val wb_buffer = Reg(Vec(refillCycles, UInt(encRowBits.W)))
val acked = RegInit(false.B)
io.idx.valid := state =/= s_invalid
io.idx.bits := req.idx
io.release.valid := false.B
io.release.bits := DontCare
io.req.ready := false.B
io.meta_read.valid := false.B
io.meta_read.bits := DontCare
io.data_req.valid := false.B
io.data_req.bits := DontCare
io.resp := false.B
io.lsu_release.valid := false.B
io.lsu_release.bits := DontCare
val r_address = Cat(req.tag, req.idx) << blockOffBits
val id = cfg.nMSHRs
val probeResponse = edge.ProbeAck(
fromSource = req.source,
toAddress = r_address,
lgSize = lgCacheBlockBytes.U,
reportPermissions = req.param,
data = wb_buffer(data_req_cnt))
val voluntaryRelease = edge.Release(
fromSource = id.U,
toAddress = r_address,
lgSize = lgCacheBlockBytes.U,
shrinkPermissions = req.param,
data = wb_buffer(data_req_cnt))._2
when (state === s_invalid) {
io.req.ready := true.B
when (io.req.fire) {
state := s_fill_buffer
data_req_cnt := 0.U
req := io.req.bits
acked := false.B
}
} .elsewhen (state === s_fill_buffer) {
io.meta_read.valid := data_req_cnt < refillCycles.U
io.meta_read.bits.idx := req.idx
io.meta_read.bits.tag := req.tag
io.data_req.valid := data_req_cnt < refillCycles.U
io.data_req.bits.way_en := req.way_en
io.data_req.bits.addr := (if(refillCycles > 1)
Cat(req.idx, data_req_cnt(log2Up(refillCycles)-1,0))
else req.idx) << rowOffBits
r1_data_req_fired := false.B
r1_data_req_cnt := 0.U
r2_data_req_fired := r1_data_req_fired
r2_data_req_cnt := r1_data_req_cnt
when (io.data_req.fire && io.meta_read.fire) {
r1_data_req_fired := true.B
r1_data_req_cnt := data_req_cnt
data_req_cnt := data_req_cnt + 1.U
}
when (r2_data_req_fired) {
wb_buffer(r2_data_req_cnt) := io.data_resp
when (r2_data_req_cnt === (refillCycles-1).U) {
io.resp := true.B
state := s_lsu_release
data_req_cnt := 0.U
}
}
} .elsewhen (state === s_lsu_release) {
io.lsu_release.valid := true.B
io.lsu_release.bits := probeResponse
when (io.lsu_release.fire) {
state := s_active
}
} .elsewhen (state === s_active) {
io.release.valid := data_req_cnt < refillCycles.U
io.release.bits := Mux(req.voluntary, voluntaryRelease, probeResponse)
when (io.mem_grant) {
acked := true.B
}
when (io.release.fire) {
data_req_cnt := data_req_cnt + 1.U
}
when ((data_req_cnt === (refillCycles-1).U) && io.release.fire) {
state := Mux(req.voluntary, s_grant, s_invalid)
}
} .elsewhen (state === s_grant) {
when (io.mem_grant) {
acked := true.B
}
when (acked) {
state := s_invalid
}
}
}
class BoomProbeUnit(implicit edge: TLEdgeOut, p: Parameters) extends L1HellaCacheModule()(p) {
val io = IO(new Bundle {
val req = Flipped(Decoupled(new TLBundleB(edge.bundle)))
val rep = Decoupled(new TLBundleC(edge.bundle))
val meta_read = Decoupled(new L1MetaReadReq)
val meta_write = Decoupled(new L1MetaWriteReq)
val wb_req = Decoupled(new WritebackReq(edge.bundle))
val way_en = Input(UInt(nWays.W))
val wb_rdy = Input(Bool()) // Is writeback unit currently busy? If so need to retry meta read when its done
val mshr_rdy = Input(Bool()) // Is MSHR ready for this request to proceed?
val mshr_wb_rdy = Output(Bool()) // Should we block MSHR writebacks while we finish our own?
val block_state = Input(new ClientMetadata())
val lsu_release = Decoupled(new TLBundleC(edge.bundle))
val state = Output(Valid(UInt(coreMaxAddrBits.W)))
})
val (s_invalid :: s_meta_read :: s_meta_resp :: s_mshr_req ::
s_mshr_resp :: s_lsu_release :: s_release :: s_writeback_req :: s_writeback_resp ::
s_meta_write :: s_meta_write_resp :: Nil) = Enum(11)
val state = RegInit(s_invalid)
val req = Reg(new TLBundleB(edge.bundle))
val req_idx = req.address(idxMSB, idxLSB)
val req_tag = req.address >> untagBits
val way_en = Reg(UInt())
val tag_matches = way_en.orR
val old_coh = Reg(new ClientMetadata)
val miss_coh = ClientMetadata.onReset
val reply_coh = Mux(tag_matches, old_coh, miss_coh)
val (is_dirty, report_param, new_coh) = reply_coh.onProbe(req.param)
io.state.valid := state =/= s_invalid
io.state.bits := req.address
io.req.ready := state === s_invalid
io.rep.valid := state === s_release
io.rep.bits := edge.ProbeAck(req, report_param)
assert(!io.rep.valid || !edge.hasData(io.rep.bits),
"ProbeUnit should not send ProbeAcks with data, WritebackUnit should handle it")
io.meta_read.valid := state === s_meta_read
io.meta_read.bits.idx := req_idx
io.meta_read.bits.tag := req_tag
io.meta_read.bits.way_en := ~(0.U(nWays.W))
io.meta_write.valid := state === s_meta_write
io.meta_write.bits.way_en := way_en
io.meta_write.bits.idx := req_idx
io.meta_write.bits.tag := req_tag
io.meta_write.bits.data.tag := req_tag
io.meta_write.bits.data.coh := new_coh
io.wb_req.valid := state === s_writeback_req
io.wb_req.bits.source := req.source
io.wb_req.bits.idx := req_idx
io.wb_req.bits.tag := req_tag
io.wb_req.bits.param := report_param
io.wb_req.bits.way_en := way_en
io.wb_req.bits.voluntary := false.B
io.mshr_wb_rdy := !state.isOneOf(s_release, s_writeback_req, s_writeback_resp, s_meta_write, s_meta_write_resp)
io.lsu_release.valid := state === s_lsu_release
io.lsu_release.bits := edge.ProbeAck(req, report_param)
// state === s_invalid
when (state === s_invalid) {
when (io.req.fire) {
state := s_meta_read
req := io.req.bits
}
} .elsewhen (state === s_meta_read) {
when (io.meta_read.fire) {
state := s_meta_resp
}
} .elsewhen (state === s_meta_resp) {
// we need to wait one cycle for the metadata to be read from the array
state := s_mshr_req
} .elsewhen (state === s_mshr_req) {
old_coh := io.block_state
way_en := io.way_en
// if the read didn't go through, we need to retry
state := Mux(io.mshr_rdy && io.wb_rdy, s_mshr_resp, s_meta_read)
} .elsewhen (state === s_mshr_resp) {
state := Mux(tag_matches && is_dirty, s_writeback_req, s_lsu_release)
} .elsewhen (state === s_lsu_release) {
when (io.lsu_release.fire) {
state := s_release
}
} .elsewhen (state === s_release) {
when (io.rep.ready) {
state := Mux(tag_matches, s_meta_write, s_invalid)
}
} .elsewhen (state === s_writeback_req) {
when (io.wb_req.fire) {
state := s_writeback_resp
}
} .elsewhen (state === s_writeback_resp) {
// wait for the writeback request to finish before updating the metadata
when (io.wb_req.ready) {
state := s_meta_write
}
} .elsewhen (state === s_meta_write) {
when (io.meta_write.fire) {
state := s_meta_write_resp
}
} .elsewhen (state === s_meta_write_resp) {
state := s_invalid
}
}
class BoomL1MetaReadReq(implicit p: Parameters) extends BoomBundle()(p) {
val req = Vec(lsuWidth, new L1MetaReadReq)
}
class BoomL1DataReadReq(implicit p: Parameters) extends BoomBundle()(p) {
val req = Vec(lsuWidth, new L1DataReadReq)
val valid = Vec(lsuWidth, Bool())
}
abstract class AbstractBoomDataArray(implicit p: Parameters) extends BoomModule with HasL1HellaCacheParameters {
val io = IO(new BoomBundle {
val read = Input(Vec(lsuWidth, Valid(new L1DataReadReq)))
val write = Input(Valid(new L1DataWriteReq))
val resp = Output(Vec(lsuWidth, Vec(nWays, Bits(encRowBits.W))))
val s1_nacks = Output(Vec(lsuWidth, Bool()))
})
def pipeMap[T <: Data](f: Int => T) = VecInit((0 until lsuWidth).map(f))
}
class BoomDuplicatedDataArray(implicit p: Parameters) extends AbstractBoomDataArray
{
val waddr = io.write.bits.addr >> rowOffBits
for (j <- 0 until lsuWidth) {
val raddr = io.read(j).bits.addr >> rowOffBits
for (w <- 0 until nWays) {
val array = DescribedSRAM(
name = s"array_${w}_${j}",
desc = "Non-blocking DCache Data Array",
size = nSets * refillCycles,
data = Vec(rowWords, Bits(encDataBits.W))
)
when (io.write.bits.way_en(w) && io.write.valid) {
val data = VecInit((0 until rowWords) map (i => io.write.bits.data(encDataBits*(i+1)-1,encDataBits*i)))
array.write(waddr, data, io.write.bits.wmask.asBools)
}
if (dcacheSinglePorted)
io.resp(j)(w) := RegNext(array.read(raddr, io.read(j).bits.way_en(w) && io.read(j).valid).asUInt)
else
io.resp(j)(w) := RegNext(array.read(raddr, io.read(j).valid).asUInt)
}
io.s1_nacks(j) := false.B
}
}
class BoomBankedDataArray(implicit p: Parameters) extends AbstractBoomDataArray {
val nBanks = boomParams.numDCacheBanks
val bankSize = nSets * refillCycles / nBanks
require (nBanks >= lsuWidth)
require (bankSize > 0)
val bankBits = log2Ceil(nBanks)
val bankOffBits = log2Ceil(rowWords) + log2Ceil(wordBytes)
val bidxBits = log2Ceil(bankSize)
val bidxOffBits = bankOffBits + bankBits
//----------------------------------------------------------------------------------------------------
val s0_rbanks = if (nBanks > 1) VecInit(io.read.map(r => (r.bits.addr >> bankOffBits)(bankBits-1,0))) else VecInit(0.U)
val s0_wbank = if (nBanks > 1) (io.write.bits.addr >> bankOffBits)(bankBits-1,0) else 0.U
val s0_ridxs = VecInit(io.read.map(r => (r.bits.addr >> bidxOffBits)(bidxBits-1,0)))
val s0_widx = (io.write.bits.addr >> bidxOffBits)(bidxBits-1,0)
val s0_read_valids = VecInit(io.read.map(_.valid))
val s0_bank_conflicts = pipeMap(w => {
((s0_rbanks(w) === s0_wbank) && io.write.valid && dcacheSinglePorted.B) ||
(0 until w).foldLeft(false.B)((c,i) => c || io.read(i).valid && s0_rbanks(i) === s0_rbanks(w))
})
val s0_do_bank_read = s0_read_valids zip s0_bank_conflicts map {case (v,c) => v && !c}
val s0_bank_read_gnts = Transpose(VecInit(s0_rbanks zip s0_do_bank_read map {case (b,d) => VecInit((UIntToOH(b) & Fill(nBanks,d)).asBools)}))
val s0_bank_write_gnt = (UIntToOH(s0_wbank) & Fill(nBanks, io.write.valid)).asBools
//----------------------------------------------------------------------------------------------------
val s1_rbanks = RegNext(s0_rbanks)
val s1_ridxs = RegNext(s0_ridxs)
val s1_read_valids = RegNext(s0_read_valids)
val s1_pipe_selection = pipeMap(i => VecInit(PriorityEncoderOH(pipeMap(j =>
if (j < i) s1_read_valids(j) && s1_rbanks(j) === s1_rbanks(i)
else if (j == i) true.B else false.B))))
val s1_ridx_match = pipeMap(i => pipeMap(j => if (j < i) s1_ridxs(j) === s1_ridxs(i)
else if (j == i) true.B else false.B))
val s1_nacks = pipeMap(w => s1_read_valids(w)
&& (!RegNext(s0_do_bank_read(w)) || (s1_pipe_selection(w).asUInt & ~s1_ridx_match(w).asUInt).orR)
)
val s1_bank_selection = pipeMap(w => Mux1H(s1_pipe_selection(w), s1_rbanks))
//----------------------------------------------------------------------------------------------------
val s2_bank_selection = RegNext(s1_bank_selection)
io.s1_nacks := s1_nacks
val data_arrays = Seq.tabulate(nBanks) {
b => DescribedSRAM(
name = s"array_${b}",
desc = "Boom DCache data array",
size = bankSize,
data = Vec(nWays * rowWords, Bits(encDataBits.W))
)
}
val s2_bank_reads = Reg(Vec(nBanks, Vec(nWays, Bits(encRowBits.W))))
for (b <- 0 until nBanks) {
val array = data_arrays(b)
val ridx = Mux1H(s0_bank_read_gnts(b), s0_ridxs)
val way_en = Mux1H(s0_bank_read_gnts(b), io.read.map(_.bits.way_en))
val write_en = s0_bank_write_gnt(b)
val write_mask = Cat(Seq.tabulate(nWays) { w =>
Mux(io.write.bits.way_en(w), io.write.bits.wmask, 0.U(rowWords.W))
}.reverse).asBools
val read_en = WireInit(s0_bank_read_gnts(b).reduce(_||_))
s2_bank_reads(b) := (if (dcacheSinglePorted) {
assert(!(read_en && write_en))
array.read(ridx, !write_en && read_en)
} else {
array.read(ridx, read_en)
}).asTypeOf(Vec(nWays, Bits(encRowBits.W)))
when (write_en) {
val data = Wire(Vec(nWays * rowWords, Bits(encDataBits.W)))
for (w <- 0 until nWays) {
for (i <- 0 until rowWords) {
data(w*rowWords+i) := io.write.bits.data(encDataBits*(i+1)-1,encDataBits*i)
}
}
array.write(s0_widx, data, write_mask)
}
}
for (w <- 0 until nWays) {
for (i <- 0 until lsuWidth) {
io.resp(i)(w) := s2_bank_reads(s2_bank_selection(i))(w)
}
}
}
/**
* Top level class wrapping a non-blocking dcache.
*
* @param hartid hardware thread for the cache
*/
class BoomNonBlockingDCache(staticIdForMetadataUseOnly: Int)(implicit p: Parameters) extends LazyModule
{
private val tileParams = p(TileKey)
protected val cfg = tileParams.dcache.get
protected def cacheClientParameters = cfg.scratch.map(x => Seq()).getOrElse(Seq(TLMasterParameters.v1(
name = s"Core ${staticIdForMetadataUseOnly} DCache",
sourceId = IdRange(0, 1 max (cfg.nMSHRs + 1)),
supportsProbe = TransferSizes(cfg.blockBytes, cfg.blockBytes))))
protected def mmioClientParameters = Seq(TLMasterParameters.v1(
name = s"Core ${staticIdForMetadataUseOnly} DCache MMIO",
sourceId = IdRange(cfg.nMSHRs + 1, cfg.nMSHRs + 1 + cfg.nMMIOs),
requestFifo = true))
val node = TLClientNode(Seq(TLMasterPortParameters.v1(
cacheClientParameters ++ mmioClientParameters,
minLatency = 1)))
lazy val module = new BoomNonBlockingDCacheModule(this)
def flushOnFenceI = cfg.scratch.isEmpty && !node.edges.out(0).manager.managers.forall(m => !m.supportsAcquireT || !m.executable || m.regionType >= RegionType.TRACKED || m.regionType <= RegionType.IDEMPOTENT)
require(!tileParams.core.haveCFlush || cfg.scratch.isEmpty, "CFLUSH_D_L1 instruction requires a D$")
}
class BoomDCacheBundle(implicit p: Parameters, edge: TLEdgeOut) extends BoomBundle()(p) {
val errors = new DCacheErrors
val lsu = Flipped(new LSUDMemIO)
}
class BoomNonBlockingDCacheModule(outer: BoomNonBlockingDCache) extends LazyModuleImp(outer)
with HasL1HellaCacheParameters
with HasBoomCoreParameters
{
implicit val edge = outer.node.edges.out(0)
val (tl_out, _) = outer.node.out(0)
val io = IO(new BoomDCacheBundle)
io.errors := DontCare
private val fifoManagers = edge.manager.managers.filter(TLFIFOFixer.allVolatile)
fifoManagers.foreach { m =>
require (m.fifoId == fifoManagers.head.fifoId,
s"IOMSHRs must be FIFO for all regions with effects, but HellaCache sees ${m.nodePath.map(_.name)}")
}
def widthMap[T <: Data](f: Int => T) = VecInit((0 until lsuWidth).map(f))
val t_replay :: t_probe :: t_wb :: t_mshr_meta_read :: t_lsu :: t_prefetch :: Nil = Enum(6)
val wb = Module(new BoomWritebackUnit)
val prober = Module(new BoomProbeUnit)
val mshrs = Module(new BoomMSHRFile)
mshrs.io.clear_all := io.lsu.force_order
mshrs.io.brupdate := io.lsu.brupdate
mshrs.io.exception := io.lsu.exception
mshrs.io.rob_pnr_idx := io.lsu.rob_pnr_idx
mshrs.io.rob_head_idx := io.lsu.rob_head_idx
// tags
def onReset = L1Metadata(0.U, ClientMetadata.onReset)
val meta = Seq.fill(lsuWidth) { Module(new L1MetadataArray(onReset _)) }
val metaWriteArb = Module(new Arbiter(new L1MetaWriteReq, 2))
// 0 goes to MSHR refills, 1 goes to prober
val metaReadArb = Module(new Arbiter(new BoomL1MetaReadReq, 6))
// 0 goes to MSHR replays, 1 goes to prober, 2 goes to wb, 3 goes to MSHR meta read,
// 4 goes to pipeline, 5 goes to prefetcher
metaReadArb.io.in := DontCare
for (w <- 0 until lsuWidth) {
meta(w).io.write.valid := metaWriteArb.io.out.fire
meta(w).io.write.bits := metaWriteArb.io.out.bits
meta(w).io.read.valid := metaReadArb.io.out.valid
meta(w).io.read.bits := metaReadArb.io.out.bits.req(w)
}
metaReadArb.io.out.ready := meta.map(_.io.read.ready).reduce(_||_)
metaWriteArb.io.out.ready := meta.map(_.io.write.ready).reduce(_||_)
// data
val data = Module(if (boomParams.numDCacheBanks == 1) new BoomDuplicatedDataArray else new BoomBankedDataArray)
val dataWriteArb = Module(new Arbiter(new L1DataWriteReq, 2))
// 0 goes to pipeline, 1 goes to MSHR refills
val dataReadArb = Module(new Arbiter(new BoomL1DataReadReq, 3))
// 0 goes to MSHR replays, 1 goes to wb, 2 goes to pipeline
dataReadArb.io.in := DontCare
for (w <- 0 until lsuWidth) {
data.io.read(w).valid := dataReadArb.io.out.bits.valid(w) && dataReadArb.io.out.valid
data.io.read(w).bits := dataReadArb.io.out.bits.req(w)
}
dataReadArb.io.out.ready := true.B
data.io.write.valid := dataWriteArb.io.out.fire
data.io.write.bits := dataWriteArb.io.out.bits
dataWriteArb.io.out.ready := true.B
val singlePortedDCacheWrite = data.io.write.valid && dcacheSinglePorted.B
// ------------
// New requests
// In a 1-wide LSU, load/store wakeups and MSHR resps contend for same port, so
// we should block incoming requests when the MSHR trying to respond
val block_incoming_reqs = (lsuWidth == 1).B && mshrs.io.resp.valid
io.lsu.req.ready := metaReadArb.io.in(4).ready && dataReadArb.io.in(2).ready && !block_incoming_reqs
metaReadArb.io.in(4).valid := io.lsu.req.valid && !block_incoming_reqs
dataReadArb.io.in(2).valid := io.lsu.req.valid && !block_incoming_reqs
for (w <- 0 until lsuWidth) {
// Tag read for new requests
metaReadArb.io.in(4).bits.req(w).idx := io.lsu.req.bits(w).bits.addr >> blockOffBits
metaReadArb.io.in(4).bits.req(w).way_en := DontCare
metaReadArb.io.in(4).bits.req(w).tag := DontCare
// Data read for new requests
dataReadArb.io.in(2).bits.valid(w) := io.lsu.req.bits(w).valid
dataReadArb.io.in(2).bits.req(w).addr := io.lsu.req.bits(w).bits.addr
dataReadArb.io.in(2).bits.req(w).way_en := ~0.U(nWays.W)
}
// ------------
// MSHR Replays
val replay_req = Wire(Vec(lsuWidth, new BoomDCacheReq))
replay_req := DontCare
replay_req(0).uop := mshrs.io.replay.bits.uop
replay_req(0).addr := mshrs.io.replay.bits.addr
replay_req(0).data := mshrs.io.replay.bits.data
replay_req(0).is_hella := mshrs.io.replay.bits.is_hella
// Don't let replays get nacked due to conflict with dcache write
mshrs.io.replay.ready := metaReadArb.io.in(0).ready && dataReadArb.io.in(0).ready && !singlePortedDCacheWrite
// Tag read for MSHR replays
// We don't actually need to read the metadata, for replays we already know our way
metaReadArb.io.in(0).valid := mshrs.io.replay.valid && !singlePortedDCacheWrite
metaReadArb.io.in(0).bits.req(0).idx := mshrs.io.replay.bits.addr >> blockOffBits
metaReadArb.io.in(0).bits.req(0).way_en := DontCare
metaReadArb.io.in(0).bits.req(0).tag := DontCare
// Data read for MSHR replays
dataReadArb.io.in(0).valid := mshrs.io.replay.valid && !singlePortedDCacheWrite
dataReadArb.io.in(0).bits.req(0).addr := mshrs.io.replay.bits.addr
dataReadArb.io.in(0).bits.req(0).way_en := mshrs.io.replay.bits.way_en
dataReadArb.io.in(0).bits.valid := widthMap(w => (w == 0).B)
// -----------
// MSHR Meta read
val mshr_read_req = Wire(Vec(lsuWidth, new BoomDCacheReq))
mshr_read_req := DontCare
mshr_read_req(0).uop := NullMicroOp
mshr_read_req(0).addr := Cat(mshrs.io.meta_read.bits.tag, mshrs.io.meta_read.bits.idx) << blockOffBits
mshr_read_req(0).data := DontCare
mshr_read_req(0).is_hella := false.B
metaReadArb.io.in(3).valid := mshrs.io.meta_read.valid
metaReadArb.io.in(3).bits.req(0) := mshrs.io.meta_read.bits
mshrs.io.meta_read.ready := metaReadArb.io.in(3).ready
// -----------
// Write-backs
val wb_fire = wb.io.meta_read.fire && wb.io.data_req.fire
val wb_req = Wire(Vec(lsuWidth, new BoomDCacheReq))
wb_req := DontCare
wb_req(0).uop := NullMicroOp
wb_req(0).addr := Cat(wb.io.meta_read.bits.tag, wb.io.data_req.bits.addr)
wb_req(0).data := DontCare
wb_req(0).is_hella := false.B
// Couple the two decoupled interfaces of the WBUnit's meta_read and data_read
// Can't launch data read if possibility of conflict w. write
// Tag read for write-back
metaReadArb.io.in(2).valid := wb.io.meta_read.valid && !singlePortedDCacheWrite
metaReadArb.io.in(2).bits.req(0) := wb.io.meta_read.bits
wb.io.meta_read.ready := metaReadArb.io.in(2).ready && dataReadArb.io.in(1).ready && !singlePortedDCacheWrite
// Data read for write-back
dataReadArb.io.in(1).valid := wb.io.data_req.valid && !singlePortedDCacheWrite
dataReadArb.io.in(1).bits.req(0) := wb.io.data_req.bits
dataReadArb.io.in(1).bits.valid := widthMap(w => (w == 0).B)
wb.io.data_req.ready := metaReadArb.io.in(2).ready && dataReadArb.io.in(1).ready && !singlePortedDCacheWrite
assert(!(wb.io.meta_read.fire ^ wb.io.data_req.fire))
// -------
// Prober
val prober_fire = prober.io.meta_read.fire
val prober_req = Wire(Vec(lsuWidth, new BoomDCacheReq))
prober_req := DontCare
prober_req(0).uop := NullMicroOp
prober_req(0).addr := Cat(prober.io.meta_read.bits.tag, prober.io.meta_read.bits.idx) << blockOffBits
prober_req(0).data := DontCare
prober_req(0).is_hella := false.B
// Tag read for prober
metaReadArb.io.in(1).valid := prober.io.meta_read.valid
metaReadArb.io.in(1).bits.req(0) := prober.io.meta_read.bits
prober.io.meta_read.ready := metaReadArb.io.in(1).ready
// Prober does not need to read data array
// -------
// Prefetcher
val prefetch_fire = mshrs.io.prefetch.fire
val prefetch_req = Wire(Vec(lsuWidth, new BoomDCacheReq))
prefetch_req := DontCare
prefetch_req(0) := mshrs.io.prefetch.bits
// Tag read for prefetch
metaReadArb.io.in(5).valid := mshrs.io.prefetch.valid
metaReadArb.io.in(5).bits.req(0).idx := mshrs.io.prefetch.bits.addr >> blockOffBits
metaReadArb.io.in(5).bits.req(0).way_en := DontCare
metaReadArb.io.in(5).bits.req(0).tag := DontCare
mshrs.io.prefetch.ready := metaReadArb.io.in(5).ready
// Prefetch does not need to read data array
val s0_valid = Mux(io.lsu.req.fire, VecInit(io.lsu.req.bits.map(_.valid)),
Mux(mshrs.io.replay.fire || wb_fire || prober_fire || prefetch_fire || mshrs.io.meta_read.fire,
VecInit(1.U(lsuWidth.W).asBools), VecInit(0.U(lsuWidth.W).asBools)))
val s0_req = Mux(io.lsu.req.fire , VecInit(io.lsu.req.bits.map(_.bits)),
Mux(wb_fire , wb_req,
Mux(prober_fire , prober_req,
Mux(prefetch_fire , prefetch_req,
Mux(mshrs.io.meta_read.fire, mshr_read_req
, replay_req)))))
val s0_type = Mux(io.lsu.req.fire , t_lsu,
Mux(wb_fire , t_wb,
Mux(prober_fire , t_probe,
Mux(prefetch_fire , t_prefetch,
Mux(mshrs.io.meta_read.fire, t_mshr_meta_read
, t_replay)))))
// Does this request need to send a response or nack
val s0_send_resp_or_nack = Mux(io.lsu.req.fire, s0_valid,
VecInit(Mux(mshrs.io.replay.fire && isRead(mshrs.io.replay.bits.uop.mem_cmd), 1.U(lsuWidth.W), 0.U(lsuWidth.W)).asBools))
val s1_req = RegNext(s0_req)
for (w <- 0 until lsuWidth)
s1_req(w).uop.br_mask := GetNewBrMask(io.lsu.brupdate, s0_req(w).uop)
val s2_store_failed = Wire(Bool())
val s1_valid = widthMap(w =>
RegNext(s0_valid(w) &&
!IsKilledByBranch(io.lsu.brupdate, false.B, s0_req(w).uop) &&
!(io.lsu.exception && s0_req(w).uop.uses_ldq) &&
!(s2_store_failed && io.lsu.req.fire && s0_req(w).uop.uses_stq),
init=false.B))
for (w <- 0 until lsuWidth)
assert(!(io.lsu.s1_kill(w) && !RegNext(io.lsu.req.fire) && !RegNext(io.lsu.req.bits(w).valid)))
val s1_addr = s1_req.map(_.addr)
val s1_nack = s1_addr.map(a => a(idxMSB,idxLSB) === prober.io.meta_write.bits.idx && !prober.io.req.ready)
val s1_send_resp_or_nack = RegNext(s0_send_resp_or_nack)
val s1_type = RegNext(s0_type)
val s1_mshr_meta_read_way_en = RegNext(mshrs.io.meta_read.bits.way_en)
val s1_replay_way_en = RegNext(mshrs.io.replay.bits.way_en) // For replays, the metadata isn't written yet
val s1_wb_way_en = RegNext(wb.io.data_req.bits.way_en)
// tag check
def wayMap[T <: Data](f: Int => T) = VecInit((0 until nWays).map(f))
val s1_tag_eq_way = widthMap(i => wayMap((w: Int) => meta(i).io.resp(w).tag === (s1_addr(i) >> untagBits)).asUInt)
val s1_tag_match_way = widthMap(i =>
Mux(s1_type === t_replay, s1_replay_way_en,
Mux(s1_type === t_wb, s1_wb_way_en,
Mux(s1_type === t_mshr_meta_read, s1_mshr_meta_read_way_en,
wayMap((w: Int) => s1_tag_eq_way(i)(w) && meta(i).io.resp(w).coh.isValid()).asUInt))))
val s1_wb_idx_matches = widthMap(i => (s1_addr(i)(untagBits-1,blockOffBits) === wb.io.idx.bits) && wb.io.idx.valid)
for (w <- 0 until lsuWidth) {
io.lsu.s1_nack_advisory(w) := data.io.s1_nacks(w)
}
val s2_req = RegNext(s1_req)
val s2_type = RegNext(s1_type)
val s2_valid = widthMap(w =>
RegNext(s1_valid(w) &&
!io.lsu.s1_kill(w) &&
!IsKilledByBranch(io.lsu.brupdate, false.B, s1_req(w).uop) &&
!(io.lsu.exception && s1_req(w).uop.uses_ldq) &&
!(s2_store_failed && (s1_type === t_lsu) && s1_req(w).uop.uses_stq)))
for (w <- 0 until lsuWidth)
s2_req(w).uop.br_mask := GetNewBrMask(io.lsu.brupdate, s1_req(w).uop)
val s2_tag_match_way = RegNext(s1_tag_match_way)
val s2_tag_match = s2_tag_match_way.map(_.orR)
val s2_hit_state = widthMap(i => Mux1H(s2_tag_match_way(i), wayMap((w: Int) => RegNext(meta(i).io.resp(w).coh))))
val s2_has_permission = widthMap(w => s2_hit_state(w).onAccess(s2_req(w).uop.mem_cmd)._1)
val s2_new_hit_state = widthMap(w => s2_hit_state(w).onAccess(s2_req(w).uop.mem_cmd)._3)
val s2_hit = widthMap(w => (s2_tag_match(w) && s2_has_permission(w) && s2_hit_state(w) === s2_new_hit_state(w) && !mshrs.io.block_hit(w)) || s2_type.isOneOf(t_replay, t_wb))
val s2_nack = Wire(Vec(lsuWidth, Bool()))
assert(!(s2_type === t_replay && !s2_hit(0)), "Replays should always hit")
assert(!(s2_type === t_wb && !s2_hit(0)), "Writeback should always see data hit")
val s2_wb_idx_matches = RegNext(s1_wb_idx_matches)
// lr/sc
val debug_sc_fail_addr = RegInit(0.U)
val debug_sc_fail_cnt = RegInit(0.U(8.W))
val lrsc_count = RegInit(0.U(log2Ceil(lrscCycles).W))
val lrsc_valid = lrsc_count > lrscBackoff.U
val lrsc_addr = Reg(UInt())
val s2_lr = s2_req(0).uop.mem_cmd === M_XLR && (!RegNext(s1_nack(0)) || s2_type === t_replay)
val s2_sc = s2_req(0).uop.mem_cmd === M_XSC && (!RegNext(s1_nack(0)) || s2_type === t_replay)
val s2_lrsc_addr_match = widthMap(w => lrsc_valid && lrsc_addr === (s2_req(w).addr >> blockOffBits))
val s2_sc_fail = s2_sc && !s2_lrsc_addr_match(0)
when (lrsc_count > 0.U) { lrsc_count := lrsc_count - 1.U }
when (s2_valid(0) && ((s2_type === t_lsu && s2_hit(0) && !s2_nack(0)) ||
(s2_type === t_replay && s2_req(0).uop.mem_cmd =/= M_FLUSH_ALL))) {
when (s2_lr) {
lrsc_count := (lrscCycles - 1).U
lrsc_addr := s2_req(0).addr >> blockOffBits
}
when (lrsc_count > 0.U) {
lrsc_count := 0.U
}
}
for (w <- 0 until lsuWidth) {
when (s2_valid(w) &&
s2_type === t_lsu &&
!s2_hit(w) &&
!(s2_has_permission(w) && s2_tag_match(w)) &&
s2_lrsc_addr_match(w) &&
!s2_nack(w)) {
lrsc_count := 0.U
}
}
when (s2_valid(0)) {
when (s2_req(0).addr === debug_sc_fail_addr) {
when (s2_sc_fail) {
debug_sc_fail_cnt := debug_sc_fail_cnt + 1.U
} .elsewhen (s2_sc) {
debug_sc_fail_cnt := 0.U
}
} .otherwise {
when (s2_sc_fail) {
debug_sc_fail_addr := s2_req(0).addr
debug_sc_fail_cnt := 1.U
}
}
}
assert(debug_sc_fail_cnt < 100.U, "L1DCache failed too many SCs in a row")
val s2_data = Wire(Vec(lsuWidth, Vec(nWays, UInt(encRowBits.W))))
for (i <- 0 until lsuWidth) {
for (w <- 0 until nWays) {
s2_data(i)(w) := data.io.resp(i)(w)
}
}
val s2_data_muxed = widthMap(w => Mux1H(s2_tag_match_way(w), s2_data(w)))
val s2_word_idx = widthMap(w => if (rowWords == 1) 0.U else s2_req(w).addr(log2Up(rowWords*wordBytes)-1, log2Up(wordBytes)))
// replacement policy
val replacer = cacheParams.replacement
val s1_replaced_way_en = UIntToOH(replacer.way)
val s2_replaced_way_en = UIntToOH(RegNext(replacer.way))
val s2_repl_meta = widthMap(i => Mux1H(s2_replaced_way_en, wayMap((w: Int) => RegNext(meta(i).io.resp(w))).toSeq))
// nack because of incoming probe
val s2_nack_hit = RegNext(VecInit(s1_nack))
// Nack when we hit something currently being evicted
val s2_nack_victim = widthMap(w => s2_valid(w) && s2_hit(w) && mshrs.io.secondary_miss(w))
// MSHRs not ready for request
val s2_nack_miss = widthMap(w => s2_valid(w) && !s2_hit(w) && !mshrs.io.req(w).ready)
// Bank conflict on data arrays
val s2_nack_data = widthMap(w => s2_valid(w) && RegNext(data.io.s1_nacks(w)))
// Can't allocate MSHR for same set currently being written back
val s2_nack_wb = widthMap(w => s2_valid(w) && !s2_hit(w) && s2_wb_idx_matches(w))
s2_nack := widthMap(w => (s2_nack_miss(w) || s2_nack_hit(w) || s2_nack_victim(w) || s2_nack_data(w) || s2_nack_wb(w)) && s2_type =/= t_replay)
assert(!(s2_nack_data.reduce(_||_) && s2_type.isOneOf(t_replay, t_wb)))
val s2_send_resp = widthMap(w => (
RegNext(s1_send_resp_or_nack(w)) &&
(!(s2_nack_hit(w) || s2_nack_victim(w) || s2_nack_data(w)) || s2_type === t_replay) &&
s2_hit(w) && isRead(s2_req(w).uop.mem_cmd)
))
val s2_send_store_ack = widthMap(w => (
RegNext(s1_send_resp_or_nack(w)) && !s2_nack(w) && isWrite(s2_req(w).uop.mem_cmd) &&
(s2_hit(w) || mshrs.io.req(w).fire)))
val s2_send_nack = widthMap(w => (RegNext(s1_send_resp_or_nack(w)) && s2_nack(w)))
for (w <- 0 until lsuWidth)
assert(!(s2_send_resp(w) && s2_send_nack(w)))
// hits always send a response
// If MSHR is not available, LSU has to replay this request later
// If MSHR is available and this is only a store(not a amo), we don't need to wait for resp later
s2_store_failed := s2_valid(0) && s2_nack(0) && s2_send_nack(0) && s2_req(0).uop.uses_stq
// Miss handling
for (w <- 0 until lsuWidth) {
mshrs.io.req(w).valid := s2_valid(w) &&
!s2_hit(w) &&
!s2_nack_hit(w) &&
!s2_nack_victim(w) &&
!s2_nack_data(w) &&
!s2_nack_wb(w) &&
s2_type.isOneOf(t_lsu, t_prefetch) &&
!(io.lsu.exception && s2_req(w).uop.uses_ldq) &&
(isPrefetch(s2_req(w).uop.mem_cmd) ||
isRead(s2_req(w).uop.mem_cmd) ||
isWrite(s2_req(w).uop.mem_cmd))
assert(!(mshrs.io.req(w).valid && s2_type === t_replay), "Replays should not need to go back into MSHRs")
mshrs.io.req(w).bits := DontCare
mshrs.io.req(w).bits.uop := s2_req(w).uop
mshrs.io.req(w).bits.addr := s2_req(w).addr
mshrs.io.req(w).bits.tag_match := s2_tag_match(w)
mshrs.io.req(w).bits.old_meta := Mux(s2_tag_match(w), L1Metadata(s2_repl_meta(w).tag, s2_hit_state(w)), s2_repl_meta(w))
mshrs.io.req(w).bits.way_en := Mux(s2_tag_match(w), s2_tag_match_way(w), s2_replaced_way_en)
mshrs.io.req(w).bits.data := s2_req(w).data
mshrs.io.req(w).bits.is_hella := s2_req(w).is_hella
mshrs.io.req_is_probe(w) := s2_type === t_probe && s2_valid(w)
}
mshrs.io.meta_resp.valid := !s2_nack_hit(0) || prober.io.mshr_wb_rdy
mshrs.io.meta_resp.bits := Mux1H(s2_tag_match_way(0), RegNext(meta(0).io.resp))
when (mshrs.io.req.map(_.fire).reduce(_||_)) { replacer.miss }
tl_out.a <> mshrs.io.mem_acquire
// probes and releases
prober.io.req.valid := tl_out.b.valid && !lrsc_valid
tl_out.b.ready := prober.io.req.ready && !lrsc_valid
prober.io.req.bits := tl_out.b.bits
prober.io.way_en := s2_tag_match_way(0)
prober.io.block_state := s2_hit_state(0)
metaWriteArb.io.in(1) <> prober.io.meta_write
prober.io.mshr_rdy := mshrs.io.probe_rdy
prober.io.wb_rdy := (prober.io.meta_write.bits.idx =/= wb.io.idx.bits) || !wb.io.idx.valid
mshrs.io.prober_state := prober.io.state
// refills
when (tl_out.d.bits.source === cfg.nMSHRs.U) {
// This should be ReleaseAck
tl_out.d.ready := true.B
mshrs.io.mem_grant.valid := false.B
mshrs.io.mem_grant.bits := DontCare
} .otherwise {
// This should be GrantData
mshrs.io.mem_grant <> tl_out.d
}
dataWriteArb.io.in(1) <> mshrs.io.refill
metaWriteArb.io.in(0) <> mshrs.io.meta_write
tl_out.e <> mshrs.io.mem_finish
// writebacks
val wbArb = Module(new Arbiter(new WritebackReq(edge.bundle), 2))
// 0 goes to prober, 1 goes to MSHR evictions
wbArb.io.in(0) <> prober.io.wb_req
wbArb.io.in(1) <> mshrs.io.wb_req
wb.io.req <> wbArb.io.out
wb.io.data_resp := s2_data_muxed(0)
mshrs.io.wb_resp := wb.io.resp
wb.io.mem_grant := tl_out.d.fire && tl_out.d.bits.source === cfg.nMSHRs.U
val lsu_release_arb = Module(new Arbiter(new TLBundleC(edge.bundle), 2))
io.lsu.release <> lsu_release_arb.io.out
lsu_release_arb.io.in(0) <> wb.io.lsu_release
lsu_release_arb.io.in(1) <> prober.io.lsu_release
TLArbiter.lowest(edge, tl_out.c, wb.io.release, prober.io.rep)
io.lsu.perf.release := edge.done(tl_out.c)
io.lsu.perf.acquire := edge.done(tl_out.a)
// load data gen
val s2_data_word_prebypass = widthMap(w => s2_data_muxed(w) >> Cat(s2_word_idx(w), 0.U(log2Ceil(coreDataBits).W)))
val s2_data_word = Wire(Vec(lsuWidth, UInt()))
val loadgen = (0 until lsuWidth).map { w =>
new LoadGen(s2_req(w).uop.mem_size, s2_req(w).uop.mem_signed, s2_req(w).addr,
s2_data_word(w), s2_sc && (w == 0).B, wordBytes)
}
// Mux between cache responses and uncache responses
for (w <- 0 until lsuWidth) {
io.lsu.resp(w).valid := s2_valid(w) && s2_send_resp(w)
io.lsu.resp(w).bits.uop := s2_req(w).uop
io.lsu.resp(w).bits.data := loadgen(w).data | s2_sc_fail
io.lsu.resp(w).bits.is_hella := s2_req(w).is_hella
io.lsu.nack(w).valid := s2_valid(w) && s2_send_nack(w)
io.lsu.nack(w).bits := s2_req(w)
assert(!(io.lsu.nack(w).valid && s2_type =/= t_lsu))
io.lsu.store_ack(w).valid := s2_valid(w) && s2_send_store_ack(w) && (w == 0).B
io.lsu.store_ack(w).bits := s2_req(w)
}
io.lsu.ll_resp <> mshrs.io.resp
// Store/amo hits
val s3_req = Wire(new BoomDCacheReq)
s3_req := RegNext(s2_req(0))
val s3_valid = RegNext(s2_valid(0) && s2_hit(0) && isWrite(s2_req(0).uop.mem_cmd) &&
!s2_sc_fail && !(s2_send_nack(0) && s2_nack(0)))
val s3_data_word = RegNext(s2_data_word(0))
for (w <- 1 until lsuWidth) {
assert(!(s2_valid(w) && s2_hit(w) && isWrite(s2_req(w).uop.mem_cmd) &&
!s2_sc_fail && !(s2_send_nack(w) && s2_nack(w))),
"Store must go through 0th pipe in L1D")
}
// For bypassing
val s4_req = RegNext(s3_req)
val s4_valid = RegNext(s3_valid)
val s5_req = RegNext(s4_req)
val s5_valid = RegNext(s4_valid)
val s3_bypass = widthMap(w => s3_valid && ((s2_req(w).addr >> wordOffBits) === (s3_req.addr >> wordOffBits)))
val s4_bypass = widthMap(w => s4_valid && ((s2_req(w).addr >> wordOffBits) === (s4_req.addr >> wordOffBits)))
val s5_bypass = widthMap(w => s5_valid && ((s2_req(w).addr >> wordOffBits) === (s5_req.addr >> wordOffBits)))
// Store -> Load bypassing
for (w <- 0 until lsuWidth) {
s2_data_word(w) := Mux(s3_bypass(w), s3_req.data,
Mux(s4_bypass(w), s4_req.data,
Mux(s5_bypass(w), s5_req.data,
s2_data_word_prebypass(w))))
}
val amoalu = Module(new AMOALU(xLen))
amoalu.io.mask := new StoreGen(s3_req.uop.mem_size, s3_req.addr, 0.U, xLen/8).mask
amoalu.io.cmd := s3_req.uop.mem_cmd
amoalu.io.lhs := s3_data_word
amoalu.io.rhs := RegNext(s2_req(0).data)
s3_req.data := amoalu.io.out
val s3_way = RegNext(s2_tag_match_way(0))
dataWriteArb.io.in(0).valid := s3_valid
dataWriteArb.io.in(0).bits.addr := s3_req.addr
dataWriteArb.io.in(0).bits.wmask := UIntToOH(s3_req.addr.extract(rowOffBits-1,offsetlsb))
dataWriteArb.io.in(0).bits.data := Fill(rowWords, s3_req.data)
dataWriteArb.io.in(0).bits.way_en := s3_way
io.lsu.ordered := mshrs.io.fence_rdy && !s1_valid.reduce(_||_) && !s2_valid.reduce(_||_)
}
File DescribedSRAM.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3.{Data, SyncReadMem, Vec}
import chisel3.util.log2Ceil
object DescribedSRAM {
def apply[T <: Data](
name: String,
desc: String,
size: BigInt, // depth
data: T
): SyncReadMem[T] = {
val mem = SyncReadMem(size, data)
mem.suggestName(name)
val granWidth = data match {
case v: Vec[_] => v.head.getWidth
case d => d.getWidth
}
val uid = 0
Annotated.srams(
component = mem,
name = name,
address_width = log2Ceil(size),
data_width = data.getWidth,
depth = size,
description = desc,
write_mask_granularity = granWidth
)
mem
}
}
| module BoomDuplicatedDataArray( // @[dcache.scala:281:7]
input clock, // @[dcache.scala:281:7]
input reset, // @[dcache.scala:281:7]
input io_read_0_valid, // @[dcache.scala:270:14]
input [3:0] io_read_0_bits_way_en, // @[dcache.scala:270:14]
input [11:0] io_read_0_bits_addr, // @[dcache.scala:270:14]
input io_write_valid, // @[dcache.scala:270:14]
input [3:0] io_write_bits_way_en, // @[dcache.scala:270:14]
input [11:0] io_write_bits_addr, // @[dcache.scala:270:14]
input [63:0] io_write_bits_data, // @[dcache.scala:270:14]
output [63:0] io_resp_0_0, // @[dcache.scala:270:14]
output [63:0] io_resp_0_1, // @[dcache.scala:270:14]
output [63:0] io_resp_0_2, // @[dcache.scala:270:14]
output [63:0] io_resp_0_3 // @[dcache.scala:270:14]
);
wire [63:0] _array_3_0_0_R0_data; // @[DescribedSRAM.scala:17:26]
wire [63:0] _array_2_0_0_R0_data; // @[DescribedSRAM.scala:17:26]
wire [63:0] _array_1_0_0_R0_data; // @[DescribedSRAM.scala:17:26]
wire [63:0] _array_0_0_0_R0_data; // @[DescribedSRAM.scala:17:26]
wire io_read_0_valid_0 = io_read_0_valid; // @[dcache.scala:281:7]
wire [3:0] io_read_0_bits_way_en_0 = io_read_0_bits_way_en; // @[dcache.scala:281:7]
wire [11:0] io_read_0_bits_addr_0 = io_read_0_bits_addr; // @[dcache.scala:281:7]
wire io_write_valid_0 = io_write_valid; // @[dcache.scala:281:7]
wire [3:0] io_write_bits_way_en_0 = io_write_bits_way_en; // @[dcache.scala:281:7]
wire [11:0] io_write_bits_addr_0 = io_write_bits_addr; // @[dcache.scala:281:7]
wire [63:0] io_write_bits_data_0 = io_write_bits_data; // @[dcache.scala:281:7]
wire io_s1_nacks_0 = 1'h0; // @[dcache.scala:281:7]
wire io_write_bits_wmask = 1'h1; // @[DescribedSRAM.scala:17:26]
wire [63:0] _data_T = io_write_bits_data_0; // @[dcache.scala:281:7, :296:75]
wire [63:0] _data_T_1 = io_write_bits_data_0; // @[dcache.scala:281:7, :296:75]
wire [63:0] _data_T_2 = io_write_bits_data_0; // @[dcache.scala:281:7, :296:75]
wire [63:0] _data_T_3 = io_write_bits_data_0; // @[dcache.scala:281:7, :296:75]
wire [63:0] io_resp_0_0_0; // @[dcache.scala:281:7]
wire [63:0] io_resp_0_1_0; // @[dcache.scala:281:7]
wire [63:0] io_resp_0_2_0; // @[dcache.scala:281:7]
wire [63:0] io_resp_0_3_0; // @[dcache.scala:281:7]
wire [8:0] waddr = io_write_bits_addr_0[11:3]; // @[dcache.scala:281:7, :284:34]
wire [8:0] raddr = io_read_0_bits_addr_0[11:3]; // @[dcache.scala:281:7, :287:38]
wire [8:0] _io_resp_0_0_WIRE = raddr; // @[dcache.scala:287:38, :302:44]
wire [8:0] _io_resp_0_1_WIRE = raddr; // @[dcache.scala:287:38, :302:44]
wire [8:0] _io_resp_0_2_WIRE = raddr; // @[dcache.scala:287:38, :302:44]
wire [8:0] _io_resp_0_3_WIRE = raddr; // @[dcache.scala:287:38, :302:44]
wire [63:0] data_0 = _data_T; // @[dcache.scala:296:{27,75}]
reg [63:0] io_resp_0_0_REG; // @[dcache.scala:302:33]
assign io_resp_0_0_0 = io_resp_0_0_REG; // @[dcache.scala:281:7, :302:33]
wire [63:0] data_1_0 = _data_T_1; // @[dcache.scala:296:{27,75}]
reg [63:0] io_resp_0_1_REG; // @[dcache.scala:302:33]
assign io_resp_0_1_0 = io_resp_0_1_REG; // @[dcache.scala:281:7, :302:33]
wire [63:0] data_2_0 = _data_T_2; // @[dcache.scala:296:{27,75}]
reg [63:0] io_resp_0_2_REG; // @[dcache.scala:302:33]
assign io_resp_0_2_0 = io_resp_0_2_REG; // @[dcache.scala:281:7, :302:33]
wire [63:0] data_3_0 = _data_T_3; // @[dcache.scala:296:{27,75}]
reg [63:0] io_resp_0_3_REG; // @[dcache.scala:302:33]
assign io_resp_0_3_0 = io_resp_0_3_REG; // @[dcache.scala:281:7, :302:33]
always @(posedge clock) begin // @[dcache.scala:281:7]
io_resp_0_0_REG <= _array_0_0_0_R0_data; // @[DescribedSRAM.scala:17:26]
io_resp_0_1_REG <= _array_1_0_0_R0_data; // @[DescribedSRAM.scala:17:26]
io_resp_0_2_REG <= _array_2_0_0_R0_data; // @[DescribedSRAM.scala:17:26]
io_resp_0_3_REG <= _array_3_0_0_R0_data; // @[DescribedSRAM.scala:17:26]
always @(posedge)
array_0_0_0 array_0_0_0 ( // @[DescribedSRAM.scala:17:26]
.R0_addr (_io_resp_0_0_WIRE), // @[dcache.scala:302:44]
.R0_en (io_read_0_valid_0), // @[dcache.scala:281:7]
.R0_clk (clock),
.R0_data (_array_0_0_0_R0_data),
.W0_addr (waddr), // @[dcache.scala:284:34]
.W0_en (io_write_bits_way_en_0[0] & io_write_valid_0), // @[dcache.scala:281:7, :295:{33,37}]
.W0_clk (clock),
.W0_data (data_0) // @[dcache.scala:296:27]
); // @[DescribedSRAM.scala:17:26]
array_1_0_0 array_1_0_0 ( // @[DescribedSRAM.scala:17:26]
.R0_addr (_io_resp_0_1_WIRE), // @[dcache.scala:302:44]
.R0_en (io_read_0_valid_0), // @[dcache.scala:281:7]
.R0_clk (clock),
.R0_data (_array_1_0_0_R0_data),
.W0_addr (waddr), // @[dcache.scala:284:34]
.W0_en (io_write_bits_way_en_0[1] & io_write_valid_0), // @[dcache.scala:281:7, :295:{33,37}]
.W0_clk (clock),
.W0_data (data_1_0) // @[dcache.scala:296:27]
); // @[DescribedSRAM.scala:17:26]
array_2_0_0 array_2_0_0 ( // @[DescribedSRAM.scala:17:26]
.R0_addr (_io_resp_0_2_WIRE), // @[dcache.scala:302:44]
.R0_en (io_read_0_valid_0), // @[dcache.scala:281:7]
.R0_clk (clock),
.R0_data (_array_2_0_0_R0_data),
.W0_addr (waddr), // @[dcache.scala:284:34]
.W0_en (io_write_bits_way_en_0[2] & io_write_valid_0), // @[dcache.scala:281:7, :295:{33,37}]
.W0_clk (clock),
.W0_data (data_2_0) // @[dcache.scala:296:27]
); // @[DescribedSRAM.scala:17:26]
array_3_0_0 array_3_0_0 ( // @[DescribedSRAM.scala:17:26]
.R0_addr (_io_resp_0_3_WIRE), // @[dcache.scala:302:44]
.R0_en (io_read_0_valid_0), // @[dcache.scala:281:7]
.R0_clk (clock),
.R0_data (_array_3_0_0_R0_data),
.W0_addr (waddr), // @[dcache.scala:284:34]
.W0_en (io_write_bits_way_en_0[3] & io_write_valid_0), // @[dcache.scala:281:7, :295:{33,37}]
.W0_clk (clock),
.W0_data (data_3_0) // @[dcache.scala:296:27]
); // @[DescribedSRAM.scala:17:26]
assign io_resp_0_0 = io_resp_0_0_0; // @[dcache.scala:281:7]
assign io_resp_0_1 = io_resp_0_1_0; // @[dcache.scala:281:7]
assign io_resp_0_2 = io_resp_0_2_0; // @[dcache.scala:281:7]
assign io_resp_0_3 = io_resp_0_3_0; // @[dcache.scala:281:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File PE.scala:
// See README.md for license details.
package gemmini
import chisel3._
import chisel3.util._
class PEControl[T <: Data : Arithmetic](accType: T) extends Bundle {
val dataflow = UInt(1.W) // TODO make this an Enum
val propagate = UInt(1.W) // Which register should be propagated (and which should be accumulated)?
val shift = UInt(log2Up(accType.getWidth).W) // TODO this isn't correct for Floats
}
class MacUnit[T <: Data](inputType: T, cType: T, dType: T) (implicit ev: Arithmetic[T]) extends Module {
import ev._
val io = IO(new Bundle {
val in_a = Input(inputType)
val in_b = Input(inputType)
val in_c = Input(cType)
val out_d = Output(dType)
})
io.out_d := io.in_c.mac(io.in_a, io.in_b)
}
// TODO update documentation
/**
* A PE implementing a MAC operation. Configured as fully combinational when integrated into a Mesh.
* @param width Data width of operands
*/
class PE[T <: Data](inputType: T, outputType: T, accType: T, df: Dataflow.Value, max_simultaneous_matmuls: Int)
(implicit ev: Arithmetic[T]) extends Module { // Debugging variables
import ev._
val io = IO(new Bundle {
val in_a = Input(inputType)
val in_b = Input(outputType)
val in_d = Input(outputType)
val out_a = Output(inputType)
val out_b = Output(outputType)
val out_c = Output(outputType)
val in_control = Input(new PEControl(accType))
val out_control = Output(new PEControl(accType))
val in_id = Input(UInt(log2Up(max_simultaneous_matmuls).W))
val out_id = Output(UInt(log2Up(max_simultaneous_matmuls).W))
val in_last = Input(Bool())
val out_last = Output(Bool())
val in_valid = Input(Bool())
val out_valid = Output(Bool())
val bad_dataflow = Output(Bool())
})
val cType = if (df == Dataflow.WS) inputType else accType
// When creating PEs that support multiple dataflows, the
// elaboration/synthesis tools often fail to consolidate and de-duplicate
// MAC units. To force mac circuitry to be re-used, we create a "mac_unit"
// module here which just performs a single MAC operation
val mac_unit = Module(new MacUnit(inputType,
if (df == Dataflow.WS) outputType else accType, outputType))
val a = io.in_a
val b = io.in_b
val d = io.in_d
val c1 = Reg(cType)
val c2 = Reg(cType)
val dataflow = io.in_control.dataflow
val prop = io.in_control.propagate
val shift = io.in_control.shift
val id = io.in_id
val last = io.in_last
val valid = io.in_valid
io.out_a := a
io.out_control.dataflow := dataflow
io.out_control.propagate := prop
io.out_control.shift := shift
io.out_id := id
io.out_last := last
io.out_valid := valid
mac_unit.io.in_a := a
val last_s = RegEnable(prop, valid)
val flip = last_s =/= prop
val shift_offset = Mux(flip, shift, 0.U)
// Which dataflow are we using?
val OUTPUT_STATIONARY = Dataflow.OS.id.U(1.W)
val WEIGHT_STATIONARY = Dataflow.WS.id.U(1.W)
// Is c1 being computed on, or propagated forward (in the output-stationary dataflow)?
val COMPUTE = 0.U(1.W)
val PROPAGATE = 1.U(1.W)
io.bad_dataflow := false.B
when ((df == Dataflow.OS).B || ((df == Dataflow.BOTH).B && dataflow === OUTPUT_STATIONARY)) {
when(prop === PROPAGATE) {
io.out_c := (c1 >> shift_offset).clippedToWidthOf(outputType)
io.out_b := b
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c2
c2 := mac_unit.io.out_d
c1 := d.withWidthOf(cType)
}.otherwise {
io.out_c := (c2 >> shift_offset).clippedToWidthOf(outputType)
io.out_b := b
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c1
c1 := mac_unit.io.out_d
c2 := d.withWidthOf(cType)
}
}.elsewhen ((df == Dataflow.WS).B || ((df == Dataflow.BOTH).B && dataflow === WEIGHT_STATIONARY)) {
when(prop === PROPAGATE) {
io.out_c := c1
mac_unit.io.in_b := c2.asTypeOf(inputType)
mac_unit.io.in_c := b
io.out_b := mac_unit.io.out_d
c1 := d
}.otherwise {
io.out_c := c2
mac_unit.io.in_b := c1.asTypeOf(inputType)
mac_unit.io.in_c := b
io.out_b := mac_unit.io.out_d
c2 := d
}
}.otherwise {
io.bad_dataflow := true.B
//assert(false.B, "unknown dataflow")
io.out_c := DontCare
io.out_b := DontCare
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c2
}
when (!valid) {
c1 := c1
c2 := c2
mac_unit.io.in_b := DontCare
mac_unit.io.in_c := DontCare
}
}
File Arithmetic.scala:
// A simple type class for Chisel datatypes that can add and multiply. To add your own type, simply create your own:
// implicit MyTypeArithmetic extends Arithmetic[MyType] { ... }
package gemmini
import chisel3._
import chisel3.util._
import hardfloat._
// Bundles that represent the raw bits of custom datatypes
case class Float(expWidth: Int, sigWidth: Int) extends Bundle {
val bits = UInt((expWidth + sigWidth).W)
val bias: Int = (1 << (expWidth-1)) - 1
}
case class DummySInt(w: Int) extends Bundle {
val bits = UInt(w.W)
def dontCare: DummySInt = {
val o = Wire(new DummySInt(w))
o.bits := 0.U
o
}
}
// The Arithmetic typeclass which implements various arithmetic operations on custom datatypes
abstract class Arithmetic[T <: Data] {
implicit def cast(t: T): ArithmeticOps[T]
}
abstract class ArithmeticOps[T <: Data](self: T) {
def *(t: T): T
def mac(m1: T, m2: T): T // Returns (m1 * m2 + self)
def +(t: T): T
def -(t: T): T
def >>(u: UInt): T // This is a rounding shift! Rounds away from 0
def >(t: T): Bool
def identity: T
def withWidthOf(t: T): T
def clippedToWidthOf(t: T): T // Like "withWidthOf", except that it saturates
def relu: T
def zero: T
def minimum: T
// Optional parameters, which only need to be defined if you want to enable various optimizations for transformers
def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[T])] = None
def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[T])] = None
def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = None
def mult_with_reciprocal[U <: Data](reciprocal: U) = self
}
object Arithmetic {
implicit object UIntArithmetic extends Arithmetic[UInt] {
override implicit def cast(self: UInt) = new ArithmeticOps(self) {
override def *(t: UInt) = self * t
override def mac(m1: UInt, m2: UInt) = m1 * m2 + self
override def +(t: UInt) = self + t
override def -(t: UInt) = self - t
override def >>(u: UInt) = {
// The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm
// TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here?
val point_five = Mux(u === 0.U, 0.U, self(u - 1.U))
val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U
val ones_digit = self(u)
val r = point_five & (zeros | ones_digit)
(self >> u).asUInt + r
}
override def >(t: UInt): Bool = self > t
override def withWidthOf(t: UInt) = self.asTypeOf(t)
override def clippedToWidthOf(t: UInt) = {
val sat = ((1 << (t.getWidth-1))-1).U
Mux(self > sat, sat, self)(t.getWidth-1, 0)
}
override def relu: UInt = self
override def zero: UInt = 0.U
override def identity: UInt = 1.U
override def minimum: UInt = 0.U
}
}
implicit object SIntArithmetic extends Arithmetic[SInt] {
override implicit def cast(self: SInt) = new ArithmeticOps(self) {
override def *(t: SInt) = self * t
override def mac(m1: SInt, m2: SInt) = m1 * m2 + self
override def +(t: SInt) = self + t
override def -(t: SInt) = self - t
override def >>(u: UInt) = {
// The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm
// TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here?
val point_five = Mux(u === 0.U, 0.U, self(u - 1.U))
val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U
val ones_digit = self(u)
val r = (point_five & (zeros | ones_digit)).asBool
(self >> u).asSInt + Mux(r, 1.S, 0.S)
}
override def >(t: SInt): Bool = self > t
override def withWidthOf(t: SInt) = {
if (self.getWidth >= t.getWidth)
self(t.getWidth-1, 0).asSInt
else {
val sign_bits = t.getWidth - self.getWidth
val sign = self(self.getWidth-1)
Cat(Cat(Seq.fill(sign_bits)(sign)), self).asTypeOf(t)
}
}
override def clippedToWidthOf(t: SInt): SInt = {
val maxsat = ((1 << (t.getWidth-1))-1).S
val minsat = (-(1 << (t.getWidth-1))).S
MuxCase(self, Seq((self > maxsat) -> maxsat, (self < minsat) -> minsat))(t.getWidth-1, 0).asSInt
}
override def relu: SInt = Mux(self >= 0.S, self, 0.S)
override def zero: SInt = 0.S
override def identity: SInt = 1.S
override def minimum: SInt = (-(1 << (self.getWidth-1))).S
override def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = {
// TODO this uses a floating point divider, but we should use an integer divider instead
val input = Wire(Decoupled(denom_t.cloneType))
val output = Wire(Decoupled(self.cloneType))
// We translate our integer to floating-point form so that we can use the hardfloat divider
val expWidth = log2Up(self.getWidth) + 1
val sigWidth = self.getWidth
def sin_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def uin_to_float(x: UInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := false.B
in_to_rec_fn.io.in := x
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = sin_to_float(self)
val denom_rec = uin_to_float(input.bits)
// Instantiate the hardloat divider
val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options))
input.ready := divider.io.inReady
divider.io.inValid := input.valid
divider.io.sqrtOp := false.B
divider.io.a := self_rec
divider.io.b := denom_rec
divider.io.roundingMode := consts.round_minMag
divider.io.detectTininess := consts.tininess_afterRounding
output.valid := divider.io.outValid_div
output.bits := float_to_in(divider.io.out)
assert(!output.valid || output.ready)
Some((input, output))
}
override def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = {
// TODO this uses a floating point divider, but we should use an integer divider instead
val input = Wire(Decoupled(UInt(0.W)))
val output = Wire(Decoupled(self.cloneType))
input.bits := DontCare
// We translate our integer to floating-point form so that we can use the hardfloat divider
val expWidth = log2Up(self.getWidth) + 1
val sigWidth = self.getWidth
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = in_to_float(self)
// Instantiate the hardloat sqrt
val sqrter = Module(new DivSqrtRecFN_small(expWidth, sigWidth, 0))
input.ready := sqrter.io.inReady
sqrter.io.inValid := input.valid
sqrter.io.sqrtOp := true.B
sqrter.io.a := self_rec
sqrter.io.b := DontCare
sqrter.io.roundingMode := consts.round_minMag
sqrter.io.detectTininess := consts.tininess_afterRounding
output.valid := sqrter.io.outValid_sqrt
output.bits := float_to_in(sqrter.io.out)
assert(!output.valid || output.ready)
Some((input, output))
}
override def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = u match {
case Float(expWidth, sigWidth) =>
val input = Wire(Decoupled(UInt(0.W)))
val output = Wire(Decoupled(u.cloneType))
input.bits := DontCare
// We translate our integer to floating-point form so that we can use the hardfloat divider
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
val self_rec = in_to_float(self)
val one_rec = in_to_float(1.S)
// Instantiate the hardloat divider
val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options))
input.ready := divider.io.inReady
divider.io.inValid := input.valid
divider.io.sqrtOp := false.B
divider.io.a := one_rec
divider.io.b := self_rec
divider.io.roundingMode := consts.round_near_even
divider.io.detectTininess := consts.tininess_afterRounding
output.valid := divider.io.outValid_div
output.bits := fNFromRecFN(expWidth, sigWidth, divider.io.out).asTypeOf(u)
assert(!output.valid || output.ready)
Some((input, output))
case _ => None
}
override def mult_with_reciprocal[U <: Data](reciprocal: U): SInt = reciprocal match {
case recip @ Float(expWidth, sigWidth) =>
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = in_to_float(self)
val reciprocal_rec = recFNFromFN(expWidth, sigWidth, recip.bits)
// Instantiate the hardloat divider
val muladder = Module(new MulRecFN(expWidth, sigWidth))
muladder.io.roundingMode := consts.round_near_even
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := reciprocal_rec
float_to_in(muladder.io.out)
case _ => self
}
}
}
implicit object FloatArithmetic extends Arithmetic[Float] {
// TODO Floating point arithmetic currently switches between recoded and standard formats for every operation. However, it should stay in the recoded format as it travels through the systolic array
override implicit def cast(self: Float): ArithmeticOps[Float] = new ArithmeticOps(self) {
override def *(t: Float): Float = {
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth))
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := t_rec_resized
val out = Wire(Float(self.expWidth, self.sigWidth))
out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
out
}
override def mac(m1: Float, m2: Float): Float = {
// Recode all operands
val m1_rec = recFNFromFN(m1.expWidth, m1.sigWidth, m1.bits)
val m2_rec = recFNFromFN(m2.expWidth, m2.sigWidth, m2.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Resize m1 to self's width
val m1_resizer = Module(new RecFNToRecFN(m1.expWidth, m1.sigWidth, self.expWidth, self.sigWidth))
m1_resizer.io.in := m1_rec
m1_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
m1_resizer.io.detectTininess := consts.tininess_afterRounding
val m1_rec_resized = m1_resizer.io.out
// Resize m2 to self's width
val m2_resizer = Module(new RecFNToRecFN(m2.expWidth, m2.sigWidth, self.expWidth, self.sigWidth))
m2_resizer.io.in := m2_rec
m2_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
m2_resizer.io.detectTininess := consts.tininess_afterRounding
val m2_rec_resized = m2_resizer.io.out
// Perform multiply-add
val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth))
muladder.io.op := 0.U
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := m1_rec_resized
muladder.io.b := m2_rec_resized
muladder.io.c := self_rec
// Convert result to standard format // TODO remove these intermediate recodings
val out = Wire(Float(self.expWidth, self.sigWidth))
out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
out
}
override def +(t: Float): Float = {
require(self.getWidth >= t.getWidth) // This just makes it easier to write the resizing code
// Recode all operands
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Generate 1 as a float
val in_to_rec_fn = Module(new INToRecFN(1, self.expWidth, self.sigWidth))
in_to_rec_fn.io.signedIn := false.B
in_to_rec_fn.io.in := 1.U
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
val one_rec = in_to_rec_fn.io.out
// Resize t
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
// Perform addition
val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth))
muladder.io.op := 0.U
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := t_rec_resized
muladder.io.b := one_rec
muladder.io.c := self_rec
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
result
}
override def -(t: Float): Float = {
val t_sgn = t.bits(t.getWidth-1)
val neg_t = Cat(~t_sgn, t.bits(t.getWidth-2,0)).asTypeOf(t)
self + neg_t
}
override def >>(u: UInt): Float = {
// Recode self
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Get 2^(-u) as a recoded float
val shift_exp = Wire(UInt(self.expWidth.W))
shift_exp := self.bias.U - u
val shift_fn = Cat(0.U(1.W), shift_exp, 0.U((self.sigWidth-1).W))
val shift_rec = recFNFromFN(self.expWidth, self.sigWidth, shift_fn)
assert(shift_exp =/= 0.U, "scaling by denormalized numbers is not currently supported")
// Multiply self and 2^(-u)
val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth))
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := shift_rec
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
result
}
override def >(t: Float): Bool = {
// Recode all operands
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Resize t to self's width
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
val comparator = Module(new CompareRecFN(self.expWidth, self.sigWidth))
comparator.io.a := self_rec
comparator.io.b := t_rec_resized
comparator.io.signaling := false.B
comparator.io.gt
}
override def withWidthOf(t: Float): Float = {
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth))
resizer.io.in := self_rec
resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
resizer.io.detectTininess := consts.tininess_afterRounding
val result = Wire(Float(t.expWidth, t.sigWidth))
result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out)
result
}
override def clippedToWidthOf(t: Float): Float = {
// TODO check for overflow. Right now, we just assume that overflow doesn't happen
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth))
resizer.io.in := self_rec
resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
resizer.io.detectTininess := consts.tininess_afterRounding
val result = Wire(Float(t.expWidth, t.sigWidth))
result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out)
result
}
override def relu: Float = {
val raw = rawFloatFromFN(self.expWidth, self.sigWidth, self.bits)
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := Mux(!raw.isZero && raw.sign, 0.U, self.bits)
result
}
override def zero: Float = 0.U.asTypeOf(self)
override def identity: Float = Cat(0.U(2.W), ~(0.U((self.expWidth-1).W)), 0.U((self.sigWidth-1).W)).asTypeOf(self)
override def minimum: Float = Cat(1.U, ~(0.U(self.expWidth.W)), 0.U((self.sigWidth-1).W)).asTypeOf(self)
}
}
implicit object DummySIntArithmetic extends Arithmetic[DummySInt] {
override implicit def cast(self: DummySInt) = new ArithmeticOps(self) {
override def *(t: DummySInt) = self.dontCare
override def mac(m1: DummySInt, m2: DummySInt) = self.dontCare
override def +(t: DummySInt) = self.dontCare
override def -(t: DummySInt) = self.dontCare
override def >>(t: UInt) = self.dontCare
override def >(t: DummySInt): Bool = false.B
override def identity = self.dontCare
override def withWidthOf(t: DummySInt) = self.dontCare
override def clippedToWidthOf(t: DummySInt) = self.dontCare
override def relu = self.dontCare
override def zero = self.dontCare
override def minimum: DummySInt = self.dontCare
}
}
}
| module PE_256( // @[PE.scala:31:7]
input clock, // @[PE.scala:31:7]
input reset, // @[PE.scala:31:7]
input [7:0] io_in_a, // @[PE.scala:35:14]
input [19:0] io_in_b, // @[PE.scala:35:14]
input [19:0] io_in_d, // @[PE.scala:35:14]
output [7:0] io_out_a, // @[PE.scala:35:14]
output [19:0] io_out_b, // @[PE.scala:35:14]
output [19:0] io_out_c, // @[PE.scala:35:14]
input io_in_control_dataflow, // @[PE.scala:35:14]
input io_in_control_propagate, // @[PE.scala:35:14]
input [4:0] io_in_control_shift, // @[PE.scala:35:14]
output io_out_control_dataflow, // @[PE.scala:35:14]
output io_out_control_propagate, // @[PE.scala:35:14]
output [4:0] io_out_control_shift, // @[PE.scala:35:14]
input [2:0] io_in_id, // @[PE.scala:35:14]
output [2:0] io_out_id, // @[PE.scala:35:14]
input io_in_last, // @[PE.scala:35:14]
output io_out_last, // @[PE.scala:35:14]
input io_in_valid, // @[PE.scala:35:14]
output io_out_valid // @[PE.scala:35:14]
);
wire [7:0] io_in_a_0 = io_in_a; // @[PE.scala:31:7]
wire [19:0] io_in_b_0 = io_in_b; // @[PE.scala:31:7]
wire [19:0] io_in_d_0 = io_in_d; // @[PE.scala:31:7]
wire io_in_control_dataflow_0 = io_in_control_dataflow; // @[PE.scala:31:7]
wire io_in_control_propagate_0 = io_in_control_propagate; // @[PE.scala:31:7]
wire [4:0] io_in_control_shift_0 = io_in_control_shift; // @[PE.scala:31:7]
wire [2:0] io_in_id_0 = io_in_id; // @[PE.scala:31:7]
wire io_in_last_0 = io_in_last; // @[PE.scala:31:7]
wire io_in_valid_0 = io_in_valid; // @[PE.scala:31:7]
wire io_bad_dataflow = 1'h0; // @[PE.scala:31:7]
wire _io_out_c_T_5 = 1'h0; // @[Arithmetic.scala:125:33]
wire _io_out_c_T_6 = 1'h0; // @[Arithmetic.scala:125:60]
wire _io_out_c_T_16 = 1'h0; // @[Arithmetic.scala:125:33]
wire _io_out_c_T_17 = 1'h0; // @[Arithmetic.scala:125:60]
wire [7:0] io_out_a_0 = io_in_a_0; // @[PE.scala:31:7]
wire [19:0] _mac_unit_io_in_b_T = io_in_b_0; // @[PE.scala:31:7, :106:37]
wire [19:0] _mac_unit_io_in_b_T_2 = io_in_b_0; // @[PE.scala:31:7, :113:37]
wire [19:0] _mac_unit_io_in_b_T_8 = io_in_b_0; // @[PE.scala:31:7, :137:35]
wire io_out_control_dataflow_0 = io_in_control_dataflow_0; // @[PE.scala:31:7]
wire io_out_control_propagate_0 = io_in_control_propagate_0; // @[PE.scala:31:7]
wire [4:0] io_out_control_shift_0 = io_in_control_shift_0; // @[PE.scala:31:7]
wire [2:0] io_out_id_0 = io_in_id_0; // @[PE.scala:31:7]
wire io_out_last_0 = io_in_last_0; // @[PE.scala:31:7]
wire io_out_valid_0 = io_in_valid_0; // @[PE.scala:31:7]
wire [19:0] io_out_b_0; // @[PE.scala:31:7]
wire [19:0] io_out_c_0; // @[PE.scala:31:7]
reg [7:0] c1; // @[PE.scala:70:15]
wire [7:0] _io_out_c_zeros_T_1 = c1; // @[PE.scala:70:15]
wire [7:0] _mac_unit_io_in_b_T_6 = c1; // @[PE.scala:70:15, :127:38]
reg [7:0] c2; // @[PE.scala:71:15]
wire [7:0] _io_out_c_zeros_T_10 = c2; // @[PE.scala:71:15]
wire [7:0] _mac_unit_io_in_b_T_4 = c2; // @[PE.scala:71:15, :121:38]
reg last_s; // @[PE.scala:89:25]
wire flip = last_s != io_in_control_propagate_0; // @[PE.scala:31:7, :89:25, :90:21]
wire [4:0] shift_offset = flip ? io_in_control_shift_0 : 5'h0; // @[PE.scala:31:7, :90:21, :91:25]
wire _GEN = shift_offset == 5'h0; // @[PE.scala:91:25]
wire _io_out_c_point_five_T; // @[Arithmetic.scala:101:32]
assign _io_out_c_point_five_T = _GEN; // @[Arithmetic.scala:101:32]
wire _io_out_c_point_five_T_5; // @[Arithmetic.scala:101:32]
assign _io_out_c_point_five_T_5 = _GEN; // @[Arithmetic.scala:101:32]
wire [5:0] _GEN_0 = {1'h0, shift_offset} - 6'h1; // @[PE.scala:91:25]
wire [5:0] _io_out_c_point_five_T_1; // @[Arithmetic.scala:101:53]
assign _io_out_c_point_five_T_1 = _GEN_0; // @[Arithmetic.scala:101:53]
wire [5:0] _io_out_c_zeros_T_2; // @[Arithmetic.scala:102:66]
assign _io_out_c_zeros_T_2 = _GEN_0; // @[Arithmetic.scala:101:53, :102:66]
wire [5:0] _io_out_c_point_five_T_6; // @[Arithmetic.scala:101:53]
assign _io_out_c_point_five_T_6 = _GEN_0; // @[Arithmetic.scala:101:53]
wire [5:0] _io_out_c_zeros_T_11; // @[Arithmetic.scala:102:66]
assign _io_out_c_zeros_T_11 = _GEN_0; // @[Arithmetic.scala:101:53, :102:66]
wire [4:0] _io_out_c_point_five_T_2 = _io_out_c_point_five_T_1[4:0]; // @[Arithmetic.scala:101:53]
wire [7:0] _io_out_c_point_five_T_3 = $signed($signed(c1) >>> _io_out_c_point_five_T_2); // @[PE.scala:70:15]
wire _io_out_c_point_five_T_4 = _io_out_c_point_five_T_3[0]; // @[Arithmetic.scala:101:50]
wire io_out_c_point_five = ~_io_out_c_point_five_T & _io_out_c_point_five_T_4; // @[Arithmetic.scala:101:{29,32,50}]
wire _GEN_1 = shift_offset < 5'h2; // @[PE.scala:91:25]
wire _io_out_c_zeros_T; // @[Arithmetic.scala:102:27]
assign _io_out_c_zeros_T = _GEN_1; // @[Arithmetic.scala:102:27]
wire _io_out_c_zeros_T_9; // @[Arithmetic.scala:102:27]
assign _io_out_c_zeros_T_9 = _GEN_1; // @[Arithmetic.scala:102:27]
wire [4:0] _io_out_c_zeros_T_3 = _io_out_c_zeros_T_2[4:0]; // @[Arithmetic.scala:102:66]
wire [31:0] _io_out_c_zeros_T_4 = 32'h1 << _io_out_c_zeros_T_3; // @[Arithmetic.scala:102:{60,66}]
wire [32:0] _io_out_c_zeros_T_5 = {1'h0, _io_out_c_zeros_T_4} - 33'h1; // @[Arithmetic.scala:102:{60,81}]
wire [31:0] _io_out_c_zeros_T_6 = _io_out_c_zeros_T_5[31:0]; // @[Arithmetic.scala:102:81]
wire [31:0] _io_out_c_zeros_T_7 = {24'h0, _io_out_c_zeros_T_6[7:0] & _io_out_c_zeros_T_1}; // @[Arithmetic.scala:102:{45,52,81}]
wire [31:0] _io_out_c_zeros_T_8 = _io_out_c_zeros_T ? 32'h0 : _io_out_c_zeros_T_7; // @[Arithmetic.scala:102:{24,27,52}]
wire io_out_c_zeros = |_io_out_c_zeros_T_8; // @[Arithmetic.scala:102:{24,89}]
wire [7:0] _GEN_2 = {3'h0, shift_offset}; // @[PE.scala:91:25]
wire [7:0] _GEN_3 = $signed($signed(c1) >>> _GEN_2); // @[PE.scala:70:15]
wire [7:0] _io_out_c_ones_digit_T; // @[Arithmetic.scala:103:30]
assign _io_out_c_ones_digit_T = _GEN_3; // @[Arithmetic.scala:103:30]
wire [7:0] _io_out_c_T; // @[Arithmetic.scala:107:15]
assign _io_out_c_T = _GEN_3; // @[Arithmetic.scala:103:30, :107:15]
wire io_out_c_ones_digit = _io_out_c_ones_digit_T[0]; // @[Arithmetic.scala:103:30]
wire _io_out_c_r_T = io_out_c_zeros | io_out_c_ones_digit; // @[Arithmetic.scala:102:89, :103:30, :105:38]
wire _io_out_c_r_T_1 = io_out_c_point_five & _io_out_c_r_T; // @[Arithmetic.scala:101:29, :105:{29,38}]
wire io_out_c_r = _io_out_c_r_T_1; // @[Arithmetic.scala:105:{29,53}]
wire [1:0] _io_out_c_T_1 = {1'h0, io_out_c_r}; // @[Arithmetic.scala:105:53, :107:33]
wire [8:0] _io_out_c_T_2 = {_io_out_c_T[7], _io_out_c_T} + {{7{_io_out_c_T_1[1]}}, _io_out_c_T_1}; // @[Arithmetic.scala:107:{15,28,33}]
wire [7:0] _io_out_c_T_3 = _io_out_c_T_2[7:0]; // @[Arithmetic.scala:107:28]
wire [7:0] _io_out_c_T_4 = _io_out_c_T_3; // @[Arithmetic.scala:107:28]
wire [19:0] _io_out_c_T_7 = {{12{_io_out_c_T_4[7]}}, _io_out_c_T_4}; // @[Mux.scala:126:16]
wire [19:0] _io_out_c_T_8 = _io_out_c_T_7; // @[Mux.scala:126:16]
wire [19:0] _io_out_c_T_9 = _io_out_c_T_8; // @[Mux.scala:126:16]
wire [19:0] _io_out_c_T_10 = _io_out_c_T_9; // @[Arithmetic.scala:125:{81,99}]
wire [19:0] _mac_unit_io_in_b_T_1 = _mac_unit_io_in_b_T; // @[PE.scala:106:37]
wire [7:0] _mac_unit_io_in_b_WIRE = _mac_unit_io_in_b_T_1[7:0]; // @[PE.scala:106:37]
wire [7:0] _c1_T = io_in_d_0[7:0]; // @[PE.scala:31:7]
wire [7:0] _c2_T = io_in_d_0[7:0]; // @[PE.scala:31:7]
wire [7:0] _c1_T_1 = _c1_T; // @[Arithmetic.scala:114:{15,33}]
wire [4:0] _io_out_c_point_five_T_7 = _io_out_c_point_five_T_6[4:0]; // @[Arithmetic.scala:101:53]
wire [7:0] _io_out_c_point_five_T_8 = $signed($signed(c2) >>> _io_out_c_point_five_T_7); // @[PE.scala:71:15]
wire _io_out_c_point_five_T_9 = _io_out_c_point_five_T_8[0]; // @[Arithmetic.scala:101:50]
wire io_out_c_point_five_1 = ~_io_out_c_point_five_T_5 & _io_out_c_point_five_T_9; // @[Arithmetic.scala:101:{29,32,50}]
wire [4:0] _io_out_c_zeros_T_12 = _io_out_c_zeros_T_11[4:0]; // @[Arithmetic.scala:102:66]
wire [31:0] _io_out_c_zeros_T_13 = 32'h1 << _io_out_c_zeros_T_12; // @[Arithmetic.scala:102:{60,66}]
wire [32:0] _io_out_c_zeros_T_14 = {1'h0, _io_out_c_zeros_T_13} - 33'h1; // @[Arithmetic.scala:102:{60,81}]
wire [31:0] _io_out_c_zeros_T_15 = _io_out_c_zeros_T_14[31:0]; // @[Arithmetic.scala:102:81]
wire [31:0] _io_out_c_zeros_T_16 = {24'h0, _io_out_c_zeros_T_15[7:0] & _io_out_c_zeros_T_10}; // @[Arithmetic.scala:102:{45,52,81}]
wire [31:0] _io_out_c_zeros_T_17 = _io_out_c_zeros_T_9 ? 32'h0 : _io_out_c_zeros_T_16; // @[Arithmetic.scala:102:{24,27,52}]
wire io_out_c_zeros_1 = |_io_out_c_zeros_T_17; // @[Arithmetic.scala:102:{24,89}]
wire [7:0] _GEN_4 = $signed($signed(c2) >>> _GEN_2); // @[PE.scala:71:15]
wire [7:0] _io_out_c_ones_digit_T_1; // @[Arithmetic.scala:103:30]
assign _io_out_c_ones_digit_T_1 = _GEN_4; // @[Arithmetic.scala:103:30]
wire [7:0] _io_out_c_T_11; // @[Arithmetic.scala:107:15]
assign _io_out_c_T_11 = _GEN_4; // @[Arithmetic.scala:103:30, :107:15]
wire io_out_c_ones_digit_1 = _io_out_c_ones_digit_T_1[0]; // @[Arithmetic.scala:103:30]
wire _io_out_c_r_T_2 = io_out_c_zeros_1 | io_out_c_ones_digit_1; // @[Arithmetic.scala:102:89, :103:30, :105:38]
wire _io_out_c_r_T_3 = io_out_c_point_five_1 & _io_out_c_r_T_2; // @[Arithmetic.scala:101:29, :105:{29,38}]
wire io_out_c_r_1 = _io_out_c_r_T_3; // @[Arithmetic.scala:105:{29,53}]
wire [1:0] _io_out_c_T_12 = {1'h0, io_out_c_r_1}; // @[Arithmetic.scala:105:53, :107:33]
wire [8:0] _io_out_c_T_13 = {_io_out_c_T_11[7], _io_out_c_T_11} + {{7{_io_out_c_T_12[1]}}, _io_out_c_T_12}; // @[Arithmetic.scala:107:{15,28,33}]
wire [7:0] _io_out_c_T_14 = _io_out_c_T_13[7:0]; // @[Arithmetic.scala:107:28]
wire [7:0] _io_out_c_T_15 = _io_out_c_T_14; // @[Arithmetic.scala:107:28]
wire [19:0] _io_out_c_T_18 = {{12{_io_out_c_T_15[7]}}, _io_out_c_T_15}; // @[Mux.scala:126:16]
wire [19:0] _io_out_c_T_19 = _io_out_c_T_18; // @[Mux.scala:126:16]
wire [19:0] _io_out_c_T_20 = _io_out_c_T_19; // @[Mux.scala:126:16]
wire [19:0] _io_out_c_T_21 = _io_out_c_T_20; // @[Arithmetic.scala:125:{81,99}]
wire [19:0] _mac_unit_io_in_b_T_3 = _mac_unit_io_in_b_T_2; // @[PE.scala:113:37]
wire [7:0] _mac_unit_io_in_b_WIRE_1 = _mac_unit_io_in_b_T_3[7:0]; // @[PE.scala:113:37]
wire [7:0] _c2_T_1 = _c2_T; // @[Arithmetic.scala:114:{15,33}]
wire [7:0] _mac_unit_io_in_b_T_5; // @[PE.scala:121:38]
assign _mac_unit_io_in_b_T_5 = _mac_unit_io_in_b_T_4; // @[PE.scala:121:38]
wire [7:0] _mac_unit_io_in_b_WIRE_2 = _mac_unit_io_in_b_T_5; // @[PE.scala:121:38]
assign io_out_c_0 = io_in_control_propagate_0 ? {{12{c1[7]}}, c1} : {{12{c2[7]}}, c2}; // @[PE.scala:31:7, :70:15, :71:15, :119:30, :120:16, :126:16]
wire [7:0] _mac_unit_io_in_b_T_7; // @[PE.scala:127:38]
assign _mac_unit_io_in_b_T_7 = _mac_unit_io_in_b_T_6; // @[PE.scala:127:38]
wire [7:0] _mac_unit_io_in_b_WIRE_3 = _mac_unit_io_in_b_T_7; // @[PE.scala:127:38]
wire [19:0] _mac_unit_io_in_b_T_9 = _mac_unit_io_in_b_T_8; // @[PE.scala:137:35]
wire [7:0] _mac_unit_io_in_b_WIRE_4 = _mac_unit_io_in_b_T_9[7:0]; // @[PE.scala:137:35]
always @(posedge clock) begin // @[PE.scala:31:7]
if (io_in_valid_0 & io_in_control_propagate_0) // @[PE.scala:31:7, :102:95, :141:17, :142:8]
c1 <= io_in_d_0[7:0]; // @[PE.scala:31:7, :70:15]
if (~(~io_in_valid_0 | io_in_control_propagate_0)) // @[PE.scala:31:7, :71:15, :102:95, :119:30, :130:10, :141:{9,17}, :143:8]
c2 <= io_in_d_0[7:0]; // @[PE.scala:31:7, :71:15]
if (io_in_valid_0) // @[PE.scala:31:7]
last_s <= io_in_control_propagate_0; // @[PE.scala:31:7, :89:25]
always @(posedge)
MacUnit mac_unit ( // @[PE.scala:64:24]
.clock (clock),
.reset (reset),
.io_in_a (io_in_a_0), // @[PE.scala:31:7]
.io_in_b (io_in_control_propagate_0 ? _mac_unit_io_in_b_WIRE_2 : _mac_unit_io_in_b_WIRE_3), // @[PE.scala:31:7, :119:30, :121:{24,38}, :127:{24,38}]
.io_in_c (io_in_b_0), // @[PE.scala:31:7]
.io_out_d (io_out_b_0)
); // @[PE.scala:64:24]
assign io_out_a = io_out_a_0; // @[PE.scala:31:7]
assign io_out_b = io_out_b_0; // @[PE.scala:31:7]
assign io_out_c = io_out_c_0; // @[PE.scala:31:7]
assign io_out_control_dataflow = io_out_control_dataflow_0; // @[PE.scala:31:7]
assign io_out_control_propagate = io_out_control_propagate_0; // @[PE.scala:31:7]
assign io_out_control_shift = io_out_control_shift_0; // @[PE.scala:31:7]
assign io_out_id = io_out_id_0; // @[PE.scala:31:7]
assign io_out_last = io_out_last_0; // @[PE.scala:31:7]
assign io_out_valid = io_out_valid_0; // @[PE.scala:31:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File MulAddRecFN.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import chisel3.util._
import consts._
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class MulAddRecFN_interIo(expWidth: Int, sigWidth: Int) extends Bundle
{
//*** ENCODE SOME OF THESE CASES IN FEWER BITS?:
val isSigNaNAny = Bool()
val isNaNAOrB = Bool()
val isInfA = Bool()
val isZeroA = Bool()
val isInfB = Bool()
val isZeroB = Bool()
val signProd = Bool()
val isNaNC = Bool()
val isInfC = Bool()
val isZeroC = Bool()
val sExpSum = SInt((expWidth + 2).W)
val doSubMags = Bool()
val CIsDominant = Bool()
val CDom_CAlignDist = UInt(log2Ceil(sigWidth + 1).W)
val highAlignedSigC = UInt((sigWidth + 2).W)
val bit0AlignedSigC = UInt(1.W)
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class MulAddRecFNToRaw_preMul(expWidth: Int, sigWidth: Int) extends RawModule
{
override def desiredName = s"MulAddRecFNToRaw_preMul_e${expWidth}_s${sigWidth}"
val io = IO(new Bundle {
val op = Input(Bits(2.W))
val a = Input(Bits((expWidth + sigWidth + 1).W))
val b = Input(Bits((expWidth + sigWidth + 1).W))
val c = Input(Bits((expWidth + sigWidth + 1).W))
val mulAddA = Output(UInt(sigWidth.W))
val mulAddB = Output(UInt(sigWidth.W))
val mulAddC = Output(UInt((sigWidth * 2).W))
val toPostMul = Output(new MulAddRecFN_interIo(expWidth, sigWidth))
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
//*** POSSIBLE TO REDUCE THIS BY 1 OR 2 BITS? (CURRENTLY 2 BITS BETWEEN
//*** UNSHIFTED C AND PRODUCT):
val sigSumWidth = sigWidth * 3 + 3
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val rawA = rawFloatFromRecFN(expWidth, sigWidth, io.a)
val rawB = rawFloatFromRecFN(expWidth, sigWidth, io.b)
val rawC = rawFloatFromRecFN(expWidth, sigWidth, io.c)
val signProd = rawA.sign ^ rawB.sign ^ io.op(1)
//*** REVIEW THE BIAS FOR 'sExpAlignedProd':
val sExpAlignedProd =
rawA.sExp +& rawB.sExp + (-(BigInt(1)<<expWidth) + sigWidth + 3).S
val doSubMags = signProd ^ rawC.sign ^ io.op(0)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val sNatCAlignDist = sExpAlignedProd - rawC.sExp
val posNatCAlignDist = sNatCAlignDist(expWidth + 1, 0)
val isMinCAlign = rawA.isZero || rawB.isZero || (sNatCAlignDist < 0.S)
val CIsDominant =
! rawC.isZero && (isMinCAlign || (posNatCAlignDist <= sigWidth.U))
val CAlignDist =
Mux(isMinCAlign,
0.U,
Mux(posNatCAlignDist < (sigSumWidth - 1).U,
posNatCAlignDist(log2Ceil(sigSumWidth) - 1, 0),
(sigSumWidth - 1).U
)
)
val mainAlignedSigC =
(Mux(doSubMags, ~rawC.sig, rawC.sig) ## Fill(sigSumWidth - sigWidth + 2, doSubMags)).asSInt>>CAlignDist
val reduced4CExtra =
(orReduceBy4(rawC.sig<<((sigSumWidth - sigWidth - 1) & 3)) &
lowMask(
CAlignDist>>2,
//*** NOT NEEDED?:
// (sigSumWidth + 2)>>2,
(sigSumWidth - 1)>>2,
(sigSumWidth - sigWidth - 1)>>2
)
).orR
val alignedSigC =
Cat(mainAlignedSigC>>3,
Mux(doSubMags,
mainAlignedSigC(2, 0).andR && ! reduced4CExtra,
mainAlignedSigC(2, 0).orR || reduced4CExtra
)
)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
io.mulAddA := rawA.sig
io.mulAddB := rawB.sig
io.mulAddC := alignedSigC(sigWidth * 2, 1)
io.toPostMul.isSigNaNAny :=
isSigNaNRawFloat(rawA) || isSigNaNRawFloat(rawB) ||
isSigNaNRawFloat(rawC)
io.toPostMul.isNaNAOrB := rawA.isNaN || rawB.isNaN
io.toPostMul.isInfA := rawA.isInf
io.toPostMul.isZeroA := rawA.isZero
io.toPostMul.isInfB := rawB.isInf
io.toPostMul.isZeroB := rawB.isZero
io.toPostMul.signProd := signProd
io.toPostMul.isNaNC := rawC.isNaN
io.toPostMul.isInfC := rawC.isInf
io.toPostMul.isZeroC := rawC.isZero
io.toPostMul.sExpSum :=
Mux(CIsDominant, rawC.sExp, sExpAlignedProd - sigWidth.S)
io.toPostMul.doSubMags := doSubMags
io.toPostMul.CIsDominant := CIsDominant
io.toPostMul.CDom_CAlignDist := CAlignDist(log2Ceil(sigWidth + 1) - 1, 0)
io.toPostMul.highAlignedSigC :=
alignedSigC(sigSumWidth - 1, sigWidth * 2 + 1)
io.toPostMul.bit0AlignedSigC := alignedSigC(0)
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class MulAddRecFNToRaw_postMul(expWidth: Int, sigWidth: Int) extends RawModule
{
override def desiredName = s"MulAddRecFNToRaw_postMul_e${expWidth}_s${sigWidth}"
val io = IO(new Bundle {
val fromPreMul = Input(new MulAddRecFN_interIo(expWidth, sigWidth))
val mulAddResult = Input(UInt((sigWidth * 2 + 1).W))
val roundingMode = Input(UInt(3.W))
val invalidExc = Output(Bool())
val rawOut = Output(new RawFloat(expWidth, sigWidth + 2))
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val sigSumWidth = sigWidth * 3 + 3
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val roundingMode_min = (io.roundingMode === round_min)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val opSignC = io.fromPreMul.signProd ^ io.fromPreMul.doSubMags
val sigSum =
Cat(Mux(io.mulAddResult(sigWidth * 2),
io.fromPreMul.highAlignedSigC + 1.U,
io.fromPreMul.highAlignedSigC
),
io.mulAddResult(sigWidth * 2 - 1, 0),
io.fromPreMul.bit0AlignedSigC
)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val CDom_sign = opSignC
val CDom_sExp = io.fromPreMul.sExpSum - io.fromPreMul.doSubMags.zext
val CDom_absSigSum =
Mux(io.fromPreMul.doSubMags,
~sigSum(sigSumWidth - 1, sigWidth + 1),
0.U(1.W) ##
//*** IF GAP IS REDUCED TO 1 BIT, MUST REDUCE THIS COMPONENT TO 1 BIT TOO:
io.fromPreMul.highAlignedSigC(sigWidth + 1, sigWidth) ##
sigSum(sigSumWidth - 3, sigWidth + 2)
)
val CDom_absSigSumExtra =
Mux(io.fromPreMul.doSubMags,
(~sigSum(sigWidth, 1)).orR,
sigSum(sigWidth + 1, 1).orR
)
val CDom_mainSig =
(CDom_absSigSum<<io.fromPreMul.CDom_CAlignDist)(
sigWidth * 2 + 1, sigWidth - 3)
val CDom_reduced4SigExtra =
(orReduceBy4(CDom_absSigSum(sigWidth - 1, 0)<<(~sigWidth & 3)) &
lowMask(io.fromPreMul.CDom_CAlignDist>>2, 0, sigWidth>>2)).orR
val CDom_sig =
Cat(CDom_mainSig>>3,
CDom_mainSig(2, 0).orR || CDom_reduced4SigExtra ||
CDom_absSigSumExtra
)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val notCDom_signSigSum = sigSum(sigWidth * 2 + 3)
val notCDom_absSigSum =
Mux(notCDom_signSigSum,
~sigSum(sigWidth * 2 + 2, 0),
sigSum(sigWidth * 2 + 2, 0) + io.fromPreMul.doSubMags
)
val notCDom_reduced2AbsSigSum = orReduceBy2(notCDom_absSigSum)
val notCDom_normDistReduced2 = countLeadingZeros(notCDom_reduced2AbsSigSum)
val notCDom_nearNormDist = notCDom_normDistReduced2<<1
val notCDom_sExp = io.fromPreMul.sExpSum - notCDom_nearNormDist.asUInt.zext
val notCDom_mainSig =
(notCDom_absSigSum<<notCDom_nearNormDist)(
sigWidth * 2 + 3, sigWidth - 1)
val notCDom_reduced4SigExtra =
(orReduceBy2(
notCDom_reduced2AbsSigSum(sigWidth>>1, 0)<<((sigWidth>>1) & 1)) &
lowMask(notCDom_normDistReduced2>>1, 0, (sigWidth + 2)>>2)
).orR
val notCDom_sig =
Cat(notCDom_mainSig>>3,
notCDom_mainSig(2, 0).orR || notCDom_reduced4SigExtra
)
val notCDom_completeCancellation =
(notCDom_sig(sigWidth + 2, sigWidth + 1) === 0.U)
val notCDom_sign =
Mux(notCDom_completeCancellation,
roundingMode_min,
io.fromPreMul.signProd ^ notCDom_signSigSum
)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val notNaN_isInfProd = io.fromPreMul.isInfA || io.fromPreMul.isInfB
val notNaN_isInfOut = notNaN_isInfProd || io.fromPreMul.isInfC
val notNaN_addZeros =
(io.fromPreMul.isZeroA || io.fromPreMul.isZeroB) &&
io.fromPreMul.isZeroC
io.invalidExc :=
io.fromPreMul.isSigNaNAny ||
(io.fromPreMul.isInfA && io.fromPreMul.isZeroB) ||
(io.fromPreMul.isZeroA && io.fromPreMul.isInfB) ||
(! io.fromPreMul.isNaNAOrB &&
(io.fromPreMul.isInfA || io.fromPreMul.isInfB) &&
io.fromPreMul.isInfC &&
io.fromPreMul.doSubMags)
io.rawOut.isNaN := io.fromPreMul.isNaNAOrB || io.fromPreMul.isNaNC
io.rawOut.isInf := notNaN_isInfOut
//*** IMPROVE?:
io.rawOut.isZero :=
notNaN_addZeros ||
(! io.fromPreMul.CIsDominant && notCDom_completeCancellation)
io.rawOut.sign :=
(notNaN_isInfProd && io.fromPreMul.signProd) ||
(io.fromPreMul.isInfC && opSignC) ||
(notNaN_addZeros && ! roundingMode_min &&
io.fromPreMul.signProd && opSignC) ||
(notNaN_addZeros && roundingMode_min &&
(io.fromPreMul.signProd || opSignC)) ||
(! notNaN_isInfOut && ! notNaN_addZeros &&
Mux(io.fromPreMul.CIsDominant, CDom_sign, notCDom_sign))
io.rawOut.sExp := Mux(io.fromPreMul.CIsDominant, CDom_sExp, notCDom_sExp)
io.rawOut.sig := Mux(io.fromPreMul.CIsDominant, CDom_sig, notCDom_sig)
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class MulAddRecFN(expWidth: Int, sigWidth: Int) extends RawModule
{
override def desiredName = s"MulAddRecFN_e${expWidth}_s${sigWidth}"
val io = IO(new Bundle {
val op = Input(Bits(2.W))
val a = Input(Bits((expWidth + sigWidth + 1).W))
val b = Input(Bits((expWidth + sigWidth + 1).W))
val c = Input(Bits((expWidth + sigWidth + 1).W))
val roundingMode = Input(UInt(3.W))
val detectTininess = Input(UInt(1.W))
val out = Output(Bits((expWidth + sigWidth + 1).W))
val exceptionFlags = Output(Bits(5.W))
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val mulAddRecFNToRaw_preMul =
Module(new MulAddRecFNToRaw_preMul(expWidth, sigWidth))
val mulAddRecFNToRaw_postMul =
Module(new MulAddRecFNToRaw_postMul(expWidth, sigWidth))
mulAddRecFNToRaw_preMul.io.op := io.op
mulAddRecFNToRaw_preMul.io.a := io.a
mulAddRecFNToRaw_preMul.io.b := io.b
mulAddRecFNToRaw_preMul.io.c := io.c
val mulAddResult =
(mulAddRecFNToRaw_preMul.io.mulAddA *
mulAddRecFNToRaw_preMul.io.mulAddB) +&
mulAddRecFNToRaw_preMul.io.mulAddC
mulAddRecFNToRaw_postMul.io.fromPreMul :=
mulAddRecFNToRaw_preMul.io.toPostMul
mulAddRecFNToRaw_postMul.io.mulAddResult := mulAddResult
mulAddRecFNToRaw_postMul.io.roundingMode := io.roundingMode
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val roundRawFNToRecFN =
Module(new RoundRawFNToRecFN(expWidth, sigWidth, 0))
roundRawFNToRecFN.io.invalidExc := mulAddRecFNToRaw_postMul.io.invalidExc
roundRawFNToRecFN.io.infiniteExc := false.B
roundRawFNToRecFN.io.in := mulAddRecFNToRaw_postMul.io.rawOut
roundRawFNToRecFN.io.roundingMode := io.roundingMode
roundRawFNToRecFN.io.detectTininess := io.detectTininess
io.out := roundRawFNToRecFN.io.out
io.exceptionFlags := roundRawFNToRecFN.io.exceptionFlags
}
| module MulAddRecFN_e8_s24_4( // @[MulAddRecFN.scala:300:7]
input [32:0] io_a, // @[MulAddRecFN.scala:303:16]
input [32:0] io_b, // @[MulAddRecFN.scala:303:16]
output [32:0] io_out // @[MulAddRecFN.scala:303:16]
);
wire _mulAddRecFNToRaw_postMul_io_invalidExc; // @[MulAddRecFN.scala:319:15]
wire _mulAddRecFNToRaw_postMul_io_rawOut_isNaN; // @[MulAddRecFN.scala:319:15]
wire _mulAddRecFNToRaw_postMul_io_rawOut_isInf; // @[MulAddRecFN.scala:319:15]
wire _mulAddRecFNToRaw_postMul_io_rawOut_isZero; // @[MulAddRecFN.scala:319:15]
wire _mulAddRecFNToRaw_postMul_io_rawOut_sign; // @[MulAddRecFN.scala:319:15]
wire [9:0] _mulAddRecFNToRaw_postMul_io_rawOut_sExp; // @[MulAddRecFN.scala:319:15]
wire [26:0] _mulAddRecFNToRaw_postMul_io_rawOut_sig; // @[MulAddRecFN.scala:319:15]
wire [23:0] _mulAddRecFNToRaw_preMul_io_mulAddA; // @[MulAddRecFN.scala:317:15]
wire [23:0] _mulAddRecFNToRaw_preMul_io_mulAddB; // @[MulAddRecFN.scala:317:15]
wire [47:0] _mulAddRecFNToRaw_preMul_io_mulAddC; // @[MulAddRecFN.scala:317:15]
wire _mulAddRecFNToRaw_preMul_io_toPostMul_isSigNaNAny; // @[MulAddRecFN.scala:317:15]
wire _mulAddRecFNToRaw_preMul_io_toPostMul_isNaNAOrB; // @[MulAddRecFN.scala:317:15]
wire _mulAddRecFNToRaw_preMul_io_toPostMul_isInfA; // @[MulAddRecFN.scala:317:15]
wire _mulAddRecFNToRaw_preMul_io_toPostMul_isZeroA; // @[MulAddRecFN.scala:317:15]
wire _mulAddRecFNToRaw_preMul_io_toPostMul_isInfB; // @[MulAddRecFN.scala:317:15]
wire _mulAddRecFNToRaw_preMul_io_toPostMul_isZeroB; // @[MulAddRecFN.scala:317:15]
wire _mulAddRecFNToRaw_preMul_io_toPostMul_signProd; // @[MulAddRecFN.scala:317:15]
wire [9:0] _mulAddRecFNToRaw_preMul_io_toPostMul_sExpSum; // @[MulAddRecFN.scala:317:15]
wire _mulAddRecFNToRaw_preMul_io_toPostMul_doSubMags; // @[MulAddRecFN.scala:317:15]
wire [4:0] _mulAddRecFNToRaw_preMul_io_toPostMul_CDom_CAlignDist; // @[MulAddRecFN.scala:317:15]
wire [25:0] _mulAddRecFNToRaw_preMul_io_toPostMul_highAlignedSigC; // @[MulAddRecFN.scala:317:15]
wire _mulAddRecFNToRaw_preMul_io_toPostMul_bit0AlignedSigC; // @[MulAddRecFN.scala:317:15]
wire [32:0] io_a_0 = io_a; // @[MulAddRecFN.scala:300:7]
wire [32:0] io_b_0 = io_b; // @[MulAddRecFN.scala:300:7]
wire io_detectTininess = 1'h1; // @[MulAddRecFN.scala:300:7, :303:16, :317:15, :319:15, :339:15]
wire [2:0] io_roundingMode = 3'h0; // @[MulAddRecFN.scala:300:7, :303:16, :319:15, :339:15]
wire [32:0] io_c = 33'h0; // @[MulAddRecFN.scala:300:7, :303:16, :317:15]
wire [1:0] io_op = 2'h0; // @[MulAddRecFN.scala:300:7, :303:16, :317:15]
wire [32:0] io_out_0; // @[MulAddRecFN.scala:300:7]
wire [4:0] io_exceptionFlags; // @[MulAddRecFN.scala:300:7]
wire [47:0] _mulAddResult_T = {24'h0, _mulAddRecFNToRaw_preMul_io_mulAddA} * {24'h0, _mulAddRecFNToRaw_preMul_io_mulAddB}; // @[MulAddRecFN.scala:317:15, :327:45]
wire [48:0] mulAddResult = {1'h0, _mulAddResult_T} + {1'h0, _mulAddRecFNToRaw_preMul_io_mulAddC}; // @[MulAddRecFN.scala:317:15, :327:45, :328:50]
MulAddRecFNToRaw_preMul_e8_s24_4 mulAddRecFNToRaw_preMul ( // @[MulAddRecFN.scala:317:15]
.io_a (io_a_0), // @[MulAddRecFN.scala:300:7]
.io_b (io_b_0), // @[MulAddRecFN.scala:300:7]
.io_mulAddA (_mulAddRecFNToRaw_preMul_io_mulAddA),
.io_mulAddB (_mulAddRecFNToRaw_preMul_io_mulAddB),
.io_mulAddC (_mulAddRecFNToRaw_preMul_io_mulAddC),
.io_toPostMul_isSigNaNAny (_mulAddRecFNToRaw_preMul_io_toPostMul_isSigNaNAny),
.io_toPostMul_isNaNAOrB (_mulAddRecFNToRaw_preMul_io_toPostMul_isNaNAOrB),
.io_toPostMul_isInfA (_mulAddRecFNToRaw_preMul_io_toPostMul_isInfA),
.io_toPostMul_isZeroA (_mulAddRecFNToRaw_preMul_io_toPostMul_isZeroA),
.io_toPostMul_isInfB (_mulAddRecFNToRaw_preMul_io_toPostMul_isInfB),
.io_toPostMul_isZeroB (_mulAddRecFNToRaw_preMul_io_toPostMul_isZeroB),
.io_toPostMul_signProd (_mulAddRecFNToRaw_preMul_io_toPostMul_signProd),
.io_toPostMul_sExpSum (_mulAddRecFNToRaw_preMul_io_toPostMul_sExpSum),
.io_toPostMul_doSubMags (_mulAddRecFNToRaw_preMul_io_toPostMul_doSubMags),
.io_toPostMul_CDom_CAlignDist (_mulAddRecFNToRaw_preMul_io_toPostMul_CDom_CAlignDist),
.io_toPostMul_highAlignedSigC (_mulAddRecFNToRaw_preMul_io_toPostMul_highAlignedSigC),
.io_toPostMul_bit0AlignedSigC (_mulAddRecFNToRaw_preMul_io_toPostMul_bit0AlignedSigC)
); // @[MulAddRecFN.scala:317:15]
MulAddRecFNToRaw_postMul_e8_s24_4 mulAddRecFNToRaw_postMul ( // @[MulAddRecFN.scala:319:15]
.io_fromPreMul_isSigNaNAny (_mulAddRecFNToRaw_preMul_io_toPostMul_isSigNaNAny), // @[MulAddRecFN.scala:317:15]
.io_fromPreMul_isNaNAOrB (_mulAddRecFNToRaw_preMul_io_toPostMul_isNaNAOrB), // @[MulAddRecFN.scala:317:15]
.io_fromPreMul_isInfA (_mulAddRecFNToRaw_preMul_io_toPostMul_isInfA), // @[MulAddRecFN.scala:317:15]
.io_fromPreMul_isZeroA (_mulAddRecFNToRaw_preMul_io_toPostMul_isZeroA), // @[MulAddRecFN.scala:317:15]
.io_fromPreMul_isInfB (_mulAddRecFNToRaw_preMul_io_toPostMul_isInfB), // @[MulAddRecFN.scala:317:15]
.io_fromPreMul_isZeroB (_mulAddRecFNToRaw_preMul_io_toPostMul_isZeroB), // @[MulAddRecFN.scala:317:15]
.io_fromPreMul_signProd (_mulAddRecFNToRaw_preMul_io_toPostMul_signProd), // @[MulAddRecFN.scala:317:15]
.io_fromPreMul_sExpSum (_mulAddRecFNToRaw_preMul_io_toPostMul_sExpSum), // @[MulAddRecFN.scala:317:15]
.io_fromPreMul_doSubMags (_mulAddRecFNToRaw_preMul_io_toPostMul_doSubMags), // @[MulAddRecFN.scala:317:15]
.io_fromPreMul_CDom_CAlignDist (_mulAddRecFNToRaw_preMul_io_toPostMul_CDom_CAlignDist), // @[MulAddRecFN.scala:317:15]
.io_fromPreMul_highAlignedSigC (_mulAddRecFNToRaw_preMul_io_toPostMul_highAlignedSigC), // @[MulAddRecFN.scala:317:15]
.io_fromPreMul_bit0AlignedSigC (_mulAddRecFNToRaw_preMul_io_toPostMul_bit0AlignedSigC), // @[MulAddRecFN.scala:317:15]
.io_mulAddResult (mulAddResult), // @[MulAddRecFN.scala:328:50]
.io_invalidExc (_mulAddRecFNToRaw_postMul_io_invalidExc),
.io_rawOut_isNaN (_mulAddRecFNToRaw_postMul_io_rawOut_isNaN),
.io_rawOut_isInf (_mulAddRecFNToRaw_postMul_io_rawOut_isInf),
.io_rawOut_isZero (_mulAddRecFNToRaw_postMul_io_rawOut_isZero),
.io_rawOut_sign (_mulAddRecFNToRaw_postMul_io_rawOut_sign),
.io_rawOut_sExp (_mulAddRecFNToRaw_postMul_io_rawOut_sExp),
.io_rawOut_sig (_mulAddRecFNToRaw_postMul_io_rawOut_sig)
); // @[MulAddRecFN.scala:319:15]
RoundRawFNToRecFN_e8_s24_4 roundRawFNToRecFN ( // @[MulAddRecFN.scala:339:15]
.io_invalidExc (_mulAddRecFNToRaw_postMul_io_invalidExc), // @[MulAddRecFN.scala:319:15]
.io_in_isNaN (_mulAddRecFNToRaw_postMul_io_rawOut_isNaN), // @[MulAddRecFN.scala:319:15]
.io_in_isInf (_mulAddRecFNToRaw_postMul_io_rawOut_isInf), // @[MulAddRecFN.scala:319:15]
.io_in_isZero (_mulAddRecFNToRaw_postMul_io_rawOut_isZero), // @[MulAddRecFN.scala:319:15]
.io_in_sign (_mulAddRecFNToRaw_postMul_io_rawOut_sign), // @[MulAddRecFN.scala:319:15]
.io_in_sExp (_mulAddRecFNToRaw_postMul_io_rawOut_sExp), // @[MulAddRecFN.scala:319:15]
.io_in_sig (_mulAddRecFNToRaw_postMul_io_rawOut_sig), // @[MulAddRecFN.scala:319:15]
.io_out (io_out_0),
.io_exceptionFlags (io_exceptionFlags)
); // @[MulAddRecFN.scala:339:15]
assign io_out = io_out_0; // @[MulAddRecFN.scala:300:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File SinkC.scala:
/*
* Copyright 2019 SiFive, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You should have received a copy of LICENSE.Apache2 along with
* this software. If not, you may obtain a copy at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sifive.blocks.inclusivecache
import chisel3._
import chisel3.util._
import freechips.rocketchip.tilelink._
import freechips.rocketchip.util._
class SinkCResponse(params: InclusiveCacheParameters) extends InclusiveCacheBundle(params)
{
val last = Bool()
val set = UInt(params.setBits.W)
val tag = UInt(params.tagBits.W)
val source = UInt(params.inner.bundle.sourceBits.W)
val param = UInt(3.W)
val data = Bool()
}
class PutBufferCEntry(params: InclusiveCacheParameters) extends InclusiveCacheBundle(params)
{
val data = UInt(params.inner.bundle.dataBits.W)
val corrupt = Bool()
}
class SinkC(params: InclusiveCacheParameters) extends Module
{
val io = IO(new Bundle {
val req = Decoupled(new FullRequest(params)) // Release
val resp = Valid(new SinkCResponse(params)) // ProbeAck
val c = Flipped(Decoupled(new TLBundleC(params.inner.bundle)))
// Find 'way' via MSHR CAM lookup
val set = UInt(params.setBits.W)
val way = Flipped(UInt(params.wayBits.W))
// ProbeAck write-back
val bs_adr = Decoupled(new BankedStoreInnerAddress(params))
val bs_dat = new BankedStoreInnerPoison(params)
// SourceD sideband
val rel_pop = Flipped(Decoupled(new PutBufferPop(params)))
val rel_beat = new PutBufferCEntry(params)
})
if (params.firstLevel) {
// Tie off unused ports
io.req.valid := false.B
io.req.bits := DontCare
io.resp.valid := false.B
io.resp.bits := DontCare
io.c.ready := true.B
io.set := 0.U
io.bs_adr.valid := false.B
io.bs_adr.bits := DontCare
io.bs_dat := DontCare
io.rel_pop.ready := true.B
io.rel_beat := DontCare
} else {
// No restrictions on the type of buffer
val c = params.micro.innerBuf.c(io.c)
val (tag, set, offset) = params.parseAddress(c.bits.address)
val (first, last, _, beat) = params.inner.count(c)
val hasData = params.inner.hasData(c.bits)
val raw_resp = c.bits.opcode === TLMessages.ProbeAck || c.bits.opcode === TLMessages.ProbeAckData
val resp = Mux(c.valid, raw_resp, RegEnable(raw_resp, c.valid))
// Handling of C is broken into two cases:
// ProbeAck
// if hasData, must be written to BankedStore
// if last beat, trigger resp
// Release
// if first beat, trigger req
// if hasData, go to putBuffer
// if hasData && first beat, must claim a list
assert (!(c.valid && c.bits.corrupt), "Data poisoning unavailable")
io.set := Mux(c.valid, set, RegEnable(set, c.valid)) // finds us the way
// Cut path from inner C to the BankedStore SRAM setup
// ... this makes it easier to layout the L2 data banks far away
val bs_adr = Wire(chiselTypeOf(io.bs_adr))
io.bs_adr <> Queue(bs_adr, 1, pipe=true)
io.bs_dat.data := RegEnable(c.bits.data, bs_adr.fire)
bs_adr.valid := resp && (!first || (c.valid && hasData))
bs_adr.bits.noop := !c.valid
bs_adr.bits.way := io.way
bs_adr.bits.set := io.set
bs_adr.bits.beat := Mux(c.valid, beat, RegEnable(beat + bs_adr.ready.asUInt, c.valid))
bs_adr.bits.mask := ~0.U(params.innerMaskBits.W)
params.ccover(bs_adr.valid && !bs_adr.ready, "SINKC_SRAM_STALL", "Data SRAM busy")
io.resp.valid := resp && c.valid && (first || last) && (!hasData || bs_adr.ready)
io.resp.bits.last := last
io.resp.bits.set := set
io.resp.bits.tag := tag
io.resp.bits.source := c.bits.source
io.resp.bits.param := c.bits.param
io.resp.bits.data := hasData
val putbuffer = Module(new ListBuffer(ListBufferParameters(new PutBufferCEntry(params), params.relLists, params.relBeats, false)))
val lists = RegInit(0.U(params.relLists.W))
val lists_set = WireInit(init = 0.U(params.relLists.W))
val lists_clr = WireInit(init = 0.U(params.relLists.W))
lists := (lists | lists_set) & ~lists_clr
val free = !lists.andR
val freeOH = ~(leftOR(~lists) << 1) & ~lists
val freeIdx = OHToUInt(freeOH)
val req_block = first && !io.req.ready
val buf_block = hasData && !putbuffer.io.push.ready
val set_block = hasData && first && !free
params.ccover(c.valid && !raw_resp && req_block, "SINKC_REQ_STALL", "No MSHR available to sink request")
params.ccover(c.valid && !raw_resp && buf_block, "SINKC_BUF_STALL", "No space in putbuffer for beat")
params.ccover(c.valid && !raw_resp && set_block, "SINKC_SET_STALL", "No space in putbuffer for request")
c.ready := Mux(raw_resp, !hasData || bs_adr.ready, !req_block && !buf_block && !set_block)
io.req.valid := !resp && c.valid && first && !buf_block && !set_block
putbuffer.io.push.valid := !resp && c.valid && hasData && !req_block && !set_block
when (!resp && c.valid && first && hasData && !req_block && !buf_block) { lists_set := freeOH }
val put = Mux(first, freeIdx, RegEnable(freeIdx, first))
io.req.bits.prio := VecInit(4.U(3.W).asBools)
io.req.bits.control:= false.B
io.req.bits.opcode := c.bits.opcode
io.req.bits.param := c.bits.param
io.req.bits.size := c.bits.size
io.req.bits.source := c.bits.source
io.req.bits.offset := offset
io.req.bits.set := set
io.req.bits.tag := tag
io.req.bits.put := put
putbuffer.io.push.bits.index := put
putbuffer.io.push.bits.data.data := c.bits.data
putbuffer.io.push.bits.data.corrupt := c.bits.corrupt
// Grant access to pop the data
putbuffer.io.pop.bits := io.rel_pop.bits.index
putbuffer.io.pop.valid := io.rel_pop.fire
io.rel_pop.ready := putbuffer.io.valid(io.rel_pop.bits.index(log2Ceil(params.relLists)-1,0))
io.rel_beat := putbuffer.io.data
when (io.rel_pop.fire && io.rel_pop.bits.last) {
lists_clr := UIntToOH(io.rel_pop.bits.index, params.relLists)
}
}
}
File Parameters.scala:
/*
* Copyright 2019 SiFive, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You should have received a copy of LICENSE.Apache2 along with
* this software. If not, you may obtain a copy at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sifive.blocks.inclusivecache
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config._
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.tilelink._
import freechips.rocketchip.util._
import freechips.rocketchip.util.property.cover
import scala.math.{min,max}
case class CacheParameters(
level: Int,
ways: Int,
sets: Int,
blockBytes: Int,
beatBytes: Int, // inner
hintsSkipProbe: Boolean)
{
require (ways > 0)
require (sets > 0)
require (blockBytes > 0 && isPow2(blockBytes))
require (beatBytes > 0 && isPow2(beatBytes))
require (blockBytes >= beatBytes)
val blocks = ways * sets
val sizeBytes = blocks * blockBytes
val blockBeats = blockBytes/beatBytes
}
case class InclusiveCachePortParameters(
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)
{
def apply()(implicit p: Parameters, valName: ValName) = LazyModule(new TLBuffer(a, b, c, d, e))
}
object InclusiveCachePortParameters
{
val none = InclusiveCachePortParameters(
a = BufferParams.none,
b = BufferParams.none,
c = BufferParams.none,
d = BufferParams.none,
e = BufferParams.none)
val full = InclusiveCachePortParameters(
a = BufferParams.default,
b = BufferParams.default,
c = BufferParams.default,
d = BufferParams.default,
e = BufferParams.default)
// This removes feed-through paths from C=>A and A=>C
val fullC = InclusiveCachePortParameters(
a = BufferParams.none,
b = BufferParams.none,
c = BufferParams.default,
d = BufferParams.none,
e = BufferParams.none)
val flowAD = InclusiveCachePortParameters(
a = BufferParams.flow,
b = BufferParams.none,
c = BufferParams.none,
d = BufferParams.flow,
e = BufferParams.none)
val flowAE = InclusiveCachePortParameters(
a = BufferParams.flow,
b = BufferParams.none,
c = BufferParams.none,
d = BufferParams.none,
e = BufferParams.flow)
// For innerBuf:
// SinkA: no restrictions, flows into scheduler+putbuffer
// SourceB: no restrictions, flows out of scheduler
// sinkC: no restrictions, flows into scheduler+putbuffer & buffered to bankedStore
// SourceD: no restrictions, flows out of bankedStore/regout
// SinkE: no restrictions, flows into scheduler
//
// ... so while none is possible, you probably want at least flowAC to cut ready
// from the scheduler delay and flowD to ease SourceD back-pressure
// For outerBufer:
// SourceA: must not be pipe, flows out of scheduler
// SinkB: no restrictions, flows into scheduler
// SourceC: pipe is useless, flows out of bankedStore/regout, parameter depth ignored
// SinkD: no restrictions, flows into scheduler & bankedStore
// SourceE: must not be pipe, flows out of scheduler
//
// ... AE take the channel ready into the scheduler, so you need at least flowAE
}
case class InclusiveCacheMicroParameters(
writeBytes: Int, // backing store update granularity
memCycles: Int = 40, // # of L2 clock cycles for a memory round-trip (50ns @ 800MHz)
portFactor: Int = 4, // numSubBanks = (widest TL port * portFactor) / writeBytes
dirReg: Boolean = false,
innerBuf: InclusiveCachePortParameters = InclusiveCachePortParameters.fullC, // or none
outerBuf: InclusiveCachePortParameters = InclusiveCachePortParameters.full) // or flowAE
{
require (writeBytes > 0 && isPow2(writeBytes))
require (memCycles > 0)
require (portFactor >= 2) // for inner RMW and concurrent outer Relase + Grant
}
case class InclusiveCacheControlParameters(
address: BigInt,
beatBytes: Int,
bankedControl: Boolean)
case class InclusiveCacheParameters(
cache: CacheParameters,
micro: InclusiveCacheMicroParameters,
control: Boolean,
inner: TLEdgeIn,
outer: TLEdgeOut)(implicit val p: Parameters)
{
require (cache.ways > 1)
require (cache.sets > 1 && isPow2(cache.sets))
require (micro.writeBytes <= inner.manager.beatBytes)
require (micro.writeBytes <= outer.manager.beatBytes)
require (inner.manager.beatBytes <= cache.blockBytes)
require (outer.manager.beatBytes <= cache.blockBytes)
// Require that all cached address ranges have contiguous blocks
outer.manager.managers.flatMap(_.address).foreach { a =>
require (a.alignment >= cache.blockBytes)
}
// If we are the first level cache, we do not need to support inner-BCE
val firstLevel = !inner.client.clients.exists(_.supports.probe)
// If we are the last level cache, we do not need to support outer-B
val lastLevel = !outer.manager.managers.exists(_.regionType > RegionType.UNCACHED)
require (lastLevel)
// Provision enough resources to achieve full throughput with missing single-beat accesses
val mshrs = InclusiveCacheParameters.all_mshrs(cache, micro)
val secondary = max(mshrs, micro.memCycles - mshrs)
val putLists = micro.memCycles // allow every request to be single beat
val putBeats = max(2*cache.blockBeats, micro.memCycles)
val relLists = 2
val relBeats = relLists*cache.blockBeats
val flatAddresses = AddressSet.unify(outer.manager.managers.flatMap(_.address))
val pickMask = AddressDecoder(flatAddresses.map(Seq(_)), flatAddresses.map(_.mask).reduce(_|_))
def bitOffsets(x: BigInt, offset: Int = 0, tail: List[Int] = List.empty[Int]): List[Int] =
if (x == 0) tail.reverse else bitOffsets(x >> 1, offset + 1, if ((x & 1) == 1) offset :: tail else tail)
val addressMapping = bitOffsets(pickMask)
val addressBits = addressMapping.size
// println(s"addresses: ${flatAddresses} => ${pickMask} => ${addressBits}")
val allClients = inner.client.clients.size
val clientBitsRaw = inner.client.clients.filter(_.supports.probe).size
val clientBits = max(1, clientBitsRaw)
val stateBits = 2
val wayBits = log2Ceil(cache.ways)
val setBits = log2Ceil(cache.sets)
val offsetBits = log2Ceil(cache.blockBytes)
val tagBits = addressBits - setBits - offsetBits
val putBits = log2Ceil(max(putLists, relLists))
require (tagBits > 0)
require (offsetBits > 0)
val innerBeatBits = (offsetBits - log2Ceil(inner.manager.beatBytes)) max 1
val outerBeatBits = (offsetBits - log2Ceil(outer.manager.beatBytes)) max 1
val innerMaskBits = inner.manager.beatBytes / micro.writeBytes
val outerMaskBits = outer.manager.beatBytes / micro.writeBytes
def clientBit(source: UInt): UInt = {
if (clientBitsRaw == 0) {
0.U
} else {
Cat(inner.client.clients.filter(_.supports.probe).map(_.sourceId.contains(source)).reverse)
}
}
def clientSource(bit: UInt): UInt = {
if (clientBitsRaw == 0) {
0.U
} else {
Mux1H(bit, inner.client.clients.filter(_.supports.probe).map(c => c.sourceId.start.U))
}
}
def parseAddress(x: UInt): (UInt, UInt, UInt) = {
val offset = Cat(addressMapping.map(o => x(o,o)).reverse)
val set = offset >> offsetBits
val tag = set >> setBits
(tag(tagBits-1, 0), set(setBits-1, 0), offset(offsetBits-1, 0))
}
def widen(x: UInt, width: Int): UInt = {
val y = x | 0.U(width.W)
assert (y >> width === 0.U)
y(width-1, 0)
}
def expandAddress(tag: UInt, set: UInt, offset: UInt): UInt = {
val base = Cat(widen(tag, tagBits), widen(set, setBits), widen(offset, offsetBits))
val bits = Array.fill(outer.bundle.addressBits) { 0.U(1.W) }
addressMapping.zipWithIndex.foreach { case (a, i) => bits(a) = base(i,i) }
Cat(bits.reverse)
}
def restoreAddress(expanded: UInt): UInt = {
val missingBits = flatAddresses
.map { a => (a.widen(pickMask).base, a.widen(~pickMask)) } // key is the bits to restore on match
.groupBy(_._1)
.view
.mapValues(_.map(_._2))
val muxMask = AddressDecoder(missingBits.values.toList)
val mux = missingBits.toList.map { case (bits, addrs) =>
val widen = addrs.map(_.widen(~muxMask))
val matches = AddressSet
.unify(widen.distinct)
.map(_.contains(expanded))
.reduce(_ || _)
(matches, bits.U)
}
expanded | Mux1H(mux)
}
def dirReg[T <: Data](x: T, en: Bool = true.B): T = {
if (micro.dirReg) RegEnable(x, en) else x
}
def ccover(cond: Bool, label: String, desc: String)(implicit sourceInfo: SourceInfo) =
cover(cond, "CCACHE_L" + cache.level + "_" + label, "MemorySystem;;" + desc)
}
object MetaData
{
val stateBits = 2
def INVALID: UInt = 0.U(stateBits.W) // way is empty
def BRANCH: UInt = 1.U(stateBits.W) // outer slave cache is trunk
def TRUNK: UInt = 2.U(stateBits.W) // unique inner master cache is trunk
def TIP: UInt = 3.U(stateBits.W) // we are trunk, inner masters are branch
// Does a request need trunk?
def needT(opcode: UInt, param: UInt): Bool = {
!opcode(2) ||
(opcode === TLMessages.Hint && param === TLHints.PREFETCH_WRITE) ||
((opcode === TLMessages.AcquireBlock || opcode === TLMessages.AcquirePerm) && param =/= TLPermissions.NtoB)
}
// Does a request prove the client need not be probed?
def skipProbeN(opcode: UInt, hintsSkipProbe: Boolean): Bool = {
// Acquire(toB) and Get => is N, so no probe
// Acquire(*toT) => is N or B, but need T, so no probe
// Hint => could be anything, so probe IS needed, if hintsSkipProbe is enabled, skip probe the same client
// Put* => is N or B, so probe IS needed
opcode === TLMessages.AcquireBlock || opcode === TLMessages.AcquirePerm || opcode === TLMessages.Get || (opcode === TLMessages.Hint && hintsSkipProbe.B)
}
def isToN(param: UInt): Bool = {
param === TLPermissions.TtoN || param === TLPermissions.BtoN || param === TLPermissions.NtoN
}
def isToB(param: UInt): Bool = {
param === TLPermissions.TtoB || param === TLPermissions.BtoB
}
}
object InclusiveCacheParameters
{
val lfsrBits = 10
val L2ControlAddress = 0x2010000
val L2ControlSize = 0x1000
def out_mshrs(cache: CacheParameters, micro: InclusiveCacheMicroParameters): Int = {
// We need 2-3 normal MSHRs to cover the Directory latency
// To fully exploit memory bandwidth-delay-product, we need memCyles/blockBeats MSHRs
max(if (micro.dirReg) 3 else 2, (micro.memCycles + cache.blockBeats - 1) / cache.blockBeats)
}
def all_mshrs(cache: CacheParameters, micro: InclusiveCacheMicroParameters): Int =
// We need a dedicated MSHR for B+C each
2 + out_mshrs(cache, micro)
}
class InclusiveCacheBundle(params: InclusiveCacheParameters) extends Bundle
File Edges.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
class TLEdge(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdgeParameters(client, manager, params, sourceInfo)
{
def isAligned(address: UInt, lgSize: UInt): Bool = {
if (maxLgSize == 0) true.B else {
val mask = UIntToOH1(lgSize, maxLgSize)
(address & mask) === 0.U
}
}
def mask(address: UInt, lgSize: UInt): UInt =
MaskGen(address, lgSize, manager.beatBytes)
def staticHasData(bundle: TLChannel): Option[Boolean] = {
bundle match {
case _:TLBundleA => {
// Do there exist A messages with Data?
val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial
// Do there exist A messages without Data?
val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint
// Statically optimize the case where hasData is a constant
if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None
}
case _:TLBundleB => {
// Do there exist B messages with Data?
val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial
// Do there exist B messages without Data?
val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint
// Statically optimize the case where hasData is a constant
if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None
}
case _:TLBundleC => {
// Do there eixst C messages with Data?
val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe
// Do there exist C messages without Data?
val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe
if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None
}
case _:TLBundleD => {
// Do there eixst D messages with Data?
val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB
// Do there exist D messages without Data?
val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT
if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None
}
case _:TLBundleE => Some(false)
}
}
def isRequest(x: TLChannel): Bool = {
x match {
case a: TLBundleA => true.B
case b: TLBundleB => true.B
case c: TLBundleC => c.opcode(2) && c.opcode(1)
// opcode === TLMessages.Release ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(2) && !d.opcode(1)
// opcode === TLMessages.Grant ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
}
def isResponse(x: TLChannel): Bool = {
x match {
case a: TLBundleA => false.B
case b: TLBundleB => false.B
case c: TLBundleC => !c.opcode(2) || !c.opcode(1)
// opcode =/= TLMessages.Release &&
// opcode =/= TLMessages.ReleaseData
case d: TLBundleD => true.B // Grant isResponse + isRequest
case e: TLBundleE => true.B
}
}
def hasData(x: TLChannel): Bool = {
val opdata = x match {
case a: TLBundleA => !a.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case b: TLBundleB => !b.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case c: TLBundleC => c.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.ProbeAckData ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
staticHasData(x).map(_.B).getOrElse(opdata)
}
def opcode(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.opcode
case b: TLBundleB => b.opcode
case c: TLBundleC => c.opcode
case d: TLBundleD => d.opcode
}
}
def param(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.param
case b: TLBundleB => b.param
case c: TLBundleC => c.param
case d: TLBundleD => d.param
}
}
def size(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.size
case b: TLBundleB => b.size
case c: TLBundleC => c.size
case d: TLBundleD => d.size
}
}
def data(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.data
case b: TLBundleB => b.data
case c: TLBundleC => c.data
case d: TLBundleD => d.data
}
}
def corrupt(x: TLDataChannel): Bool = {
x match {
case a: TLBundleA => a.corrupt
case b: TLBundleB => b.corrupt
case c: TLBundleC => c.corrupt
case d: TLBundleD => d.corrupt
}
}
def mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.mask
case b: TLBundleB => b.mask
case c: TLBundleC => mask(c.address, c.size)
}
}
def full_mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => mask(a.address, a.size)
case b: TLBundleB => mask(b.address, b.size)
case c: TLBundleC => mask(c.address, c.size)
}
}
def address(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.address
case b: TLBundleB => b.address
case c: TLBundleC => c.address
}
}
def source(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.source
case b: TLBundleB => b.source
case c: TLBundleC => c.source
case d: TLBundleD => d.source
}
}
def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes)
def addr_lo(x: UInt): UInt =
if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0)
def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x))
def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x))
def numBeats(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 1.U
case bundle: TLDataChannel => {
val hasData = this.hasData(bundle)
val size = this.size(bundle)
val cutoff = log2Ceil(manager.beatBytes)
val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U
val decode = UIntToOH(size, maxLgSize+1) >> cutoff
Mux(hasData, decode | small.asUInt, 1.U)
}
}
}
def numBeats1(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 0.U
case bundle: TLDataChannel => {
if (maxLgSize == 0) {
0.U
} else {
val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes)
Mux(hasData(bundle), decode, 0.U)
}
}
}
}
def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val beats1 = numBeats1(bits)
val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W))
val counter1 = counter - 1.U
val first = counter === 0.U
val last = counter === 1.U || beats1 === 0.U
val done = last && fire
val count = (beats1 & ~counter1)
when (fire) {
counter := Mux(first, beats1, counter1)
}
(first, last, done, count)
}
def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1
def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire)
def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid)
def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2
def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire)
def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid)
def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3
def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire)
def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid)
def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3)
}
def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire)
def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid)
def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4)
}
def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire)
def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid)
def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes))
}
def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire)
def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid)
// Does the request need T permissions to be executed?
def needT(a: TLBundleA): Bool = {
val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLPermissions.NtoB -> false.B,
TLPermissions.NtoT -> true.B,
TLPermissions.BtoT -> true.B))
MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array(
TLMessages.PutFullData -> true.B,
TLMessages.PutPartialData -> true.B,
TLMessages.ArithmeticData -> true.B,
TLMessages.LogicalData -> true.B,
TLMessages.Get -> false.B,
TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLHints.PREFETCH_READ -> false.B,
TLHints.PREFETCH_WRITE -> true.B)),
TLMessages.AcquireBlock -> acq_needT,
TLMessages.AcquirePerm -> acq_needT))
}
// This is a very expensive circuit; use only if you really mean it!
def inFlight(x: TLBundle): (UInt, UInt) = {
val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W))
val bce = manager.anySupportAcquireB && client.anySupportProbe
val (a_first, a_last, _) = firstlast(x.a)
val (b_first, b_last, _) = firstlast(x.b)
val (c_first, c_last, _) = firstlast(x.c)
val (d_first, d_last, _) = firstlast(x.d)
val (e_first, e_last, _) = firstlast(x.e)
val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits))
val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits))
val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits))
val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits))
val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits))
val a_inc = x.a.fire && a_first && a_request
val b_inc = x.b.fire && b_first && b_request
val c_inc = x.c.fire && c_first && c_request
val d_inc = x.d.fire && d_first && d_request
val e_inc = x.e.fire && e_first && e_request
val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil))
val a_dec = x.a.fire && a_last && a_response
val b_dec = x.b.fire && b_last && b_response
val c_dec = x.c.fire && c_last && c_response
val d_dec = x.d.fire && d_last && d_response
val e_dec = x.e.fire && e_last && e_response
val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil))
val next_flight = flight + PopCount(inc) - PopCount(dec)
flight := next_flight
(flight, next_flight)
}
def prettySourceMapping(context: String): String = {
s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n"
}
}
class TLEdgeOut(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
// Transfers
def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquireBlock
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquirePerm
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.Release
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ReleaseData
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) =
Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B)
def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAck
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions, data)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAckData
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B)
def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink)
def GrantAck(toSink: UInt): TLBundleE = {
val e = Wire(new TLBundleE(bundle))
e.sink := toSink
e
}
// Accesses
def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsGetFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Get
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutFullFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutFullData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, mask, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutPartialFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutPartialData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = {
require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsArithmeticFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.ArithmeticData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsLogicalFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.LogicalData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = {
require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsHintFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Hint
a.param := param
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data)
def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAckData
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size)
def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.HintAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
}
class TLEdgeIn(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = {
val todo = x.filter(!_.isEmpty)
val heads = todo.map(_.head)
val tails = todo.map(_.tail)
if (todo.isEmpty) Nil else { heads +: myTranspose(tails) }
}
// Transfers
def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = {
require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}")
val legal = client.supportsProbe(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Probe
b.param := capPermissions
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.Grant
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.GrantData
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B)
def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.ReleaseAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
// Accesses
def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = {
require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsGet(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Get
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsPutFull(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutFullData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, mask, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsPutPartial(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutPartialData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsArithmetic(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.ArithmeticData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsLogical(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.LogicalData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = {
require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsHint(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Hint
b.param := param
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size)
def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied)
def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data)
def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAckData
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B)
def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied)
def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B)
def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.HintAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
}
| module SinkC( // @[SinkC.scala:41:7]
input clock, // @[SinkC.scala:41:7]
input reset, // @[SinkC.scala:41:7]
input io_req_ready, // @[SinkC.scala:43:14]
output io_req_valid, // @[SinkC.scala:43:14]
output [2:0] io_req_bits_opcode, // @[SinkC.scala:43:14]
output [2:0] io_req_bits_param, // @[SinkC.scala:43:14]
output [2:0] io_req_bits_size, // @[SinkC.scala:43:14]
output [6:0] io_req_bits_source, // @[SinkC.scala:43:14]
output [12:0] io_req_bits_tag, // @[SinkC.scala:43:14]
output [5:0] io_req_bits_offset, // @[SinkC.scala:43:14]
output [5:0] io_req_bits_put, // @[SinkC.scala:43:14]
output [9:0] io_req_bits_set, // @[SinkC.scala:43:14]
output io_resp_valid, // @[SinkC.scala:43:14]
output io_resp_bits_last, // @[SinkC.scala:43:14]
output [9:0] io_resp_bits_set, // @[SinkC.scala:43:14]
output [12:0] io_resp_bits_tag, // @[SinkC.scala:43:14]
output [6:0] io_resp_bits_source, // @[SinkC.scala:43:14]
output [2:0] io_resp_bits_param, // @[SinkC.scala:43:14]
output io_resp_bits_data, // @[SinkC.scala:43:14]
output io_c_ready, // @[SinkC.scala:43:14]
input io_c_valid, // @[SinkC.scala:43:14]
input [2:0] io_c_bits_opcode, // @[SinkC.scala:43:14]
input [2:0] io_c_bits_param, // @[SinkC.scala:43:14]
input [2:0] io_c_bits_size, // @[SinkC.scala:43:14]
input [6:0] io_c_bits_source, // @[SinkC.scala:43:14]
input [31:0] io_c_bits_address, // @[SinkC.scala:43:14]
input [127:0] io_c_bits_data, // @[SinkC.scala:43:14]
input io_c_bits_corrupt, // @[SinkC.scala:43:14]
output [9:0] io_set, // @[SinkC.scala:43:14]
input [2:0] io_way, // @[SinkC.scala:43:14]
input io_bs_adr_ready, // @[SinkC.scala:43:14]
output io_bs_adr_valid, // @[SinkC.scala:43:14]
output io_bs_adr_bits_noop, // @[SinkC.scala:43:14]
output [2:0] io_bs_adr_bits_way, // @[SinkC.scala:43:14]
output [9:0] io_bs_adr_bits_set, // @[SinkC.scala:43:14]
output [1:0] io_bs_adr_bits_beat, // @[SinkC.scala:43:14]
output [1:0] io_bs_adr_bits_mask, // @[SinkC.scala:43:14]
output [127:0] io_bs_dat_data, // @[SinkC.scala:43:14]
output io_rel_pop_ready, // @[SinkC.scala:43:14]
input io_rel_pop_valid, // @[SinkC.scala:43:14]
input [5:0] io_rel_pop_bits_index, // @[SinkC.scala:43:14]
input io_rel_pop_bits_last, // @[SinkC.scala:43:14]
output [127:0] io_rel_beat_data, // @[SinkC.scala:43:14]
output io_rel_beat_corrupt // @[SinkC.scala:43:14]
);
wire [9:0] io_set_0; // @[SinkC.scala:41:7]
wire _putbuffer_io_push_ready; // @[SinkC.scala:115:27]
wire [1:0] _putbuffer_io_valid; // @[SinkC.scala:115:27]
wire _c_q_io_deq_valid; // @[Decoupled.scala:362:21]
wire [2:0] _c_q_io_deq_bits_opcode; // @[Decoupled.scala:362:21]
wire [2:0] _c_q_io_deq_bits_param; // @[Decoupled.scala:362:21]
wire [2:0] _c_q_io_deq_bits_size; // @[Decoupled.scala:362:21]
wire [6:0] _c_q_io_deq_bits_source; // @[Decoupled.scala:362:21]
wire [31:0] _c_q_io_deq_bits_address; // @[Decoupled.scala:362:21]
wire [127:0] _c_q_io_deq_bits_data; // @[Decoupled.scala:362:21]
wire _c_q_io_deq_bits_corrupt; // @[Decoupled.scala:362:21]
wire io_req_ready_0 = io_req_ready; // @[SinkC.scala:41:7]
wire io_c_valid_0 = io_c_valid; // @[SinkC.scala:41:7]
wire [2:0] io_c_bits_opcode_0 = io_c_bits_opcode; // @[SinkC.scala:41:7]
wire [2:0] io_c_bits_param_0 = io_c_bits_param; // @[SinkC.scala:41:7]
wire [2:0] io_c_bits_size_0 = io_c_bits_size; // @[SinkC.scala:41:7]
wire [6:0] io_c_bits_source_0 = io_c_bits_source; // @[SinkC.scala:41:7]
wire [31:0] io_c_bits_address_0 = io_c_bits_address; // @[SinkC.scala:41:7]
wire [127:0] io_c_bits_data_0 = io_c_bits_data; // @[SinkC.scala:41:7]
wire io_c_bits_corrupt_0 = io_c_bits_corrupt; // @[SinkC.scala:41:7]
wire [2:0] io_way_0 = io_way; // @[SinkC.scala:41:7]
wire io_bs_adr_ready_0 = io_bs_adr_ready; // @[SinkC.scala:41:7]
wire io_rel_pop_valid_0 = io_rel_pop_valid; // @[SinkC.scala:41:7]
wire [5:0] io_rel_pop_bits_index_0 = io_rel_pop_bits_index; // @[SinkC.scala:41:7]
wire io_rel_pop_bits_last_0 = io_rel_pop_bits_last; // @[SinkC.scala:41:7]
wire io_req_bits_prio_0 = 1'h0; // @[SinkC.scala:41:7]
wire io_req_bits_prio_1 = 1'h0; // @[SinkC.scala:41:7]
wire io_req_bits_control = 1'h0; // @[SinkC.scala:41:7]
wire io_req_bits_prio_2 = 1'h1; // @[SinkC.scala:41:7]
wire [1:0] bs_adr_bits_mask = 2'h3; // @[SinkC.scala:96:22]
wire [1:0] _bs_adr_bits_mask_T = 2'h3; // @[SinkC.scala:104:25]
wire _io_req_valid_T_6; // @[SinkC.scala:136:61]
wire [12:0] tag_1; // @[Parameters.scala:217:9]
wire [5:0] offset_1; // @[Parameters.scala:217:50]
wire [9:0] set_1; // @[Parameters.scala:217:28]
wire _io_resp_valid_T_5; // @[SinkC.scala:107:57]
wire last; // @[Edges.scala:232:33]
wire hasData; // @[Edges.scala:102:36]
wire [9:0] _io_set_T; // @[SinkC.scala:92:18]
wire [9:0] bs_adr_bits_set = io_set_0; // @[SinkC.scala:41:7, :96:22]
wire [2:0] bs_adr_bits_way = io_way_0; // @[SinkC.scala:41:7, :96:22]
wire _io_rel_pop_ready_T_2; // @[SinkC.scala:160:43]
wire [2:0] io_req_bits_opcode_0; // @[SinkC.scala:41:7]
wire [2:0] io_req_bits_param_0; // @[SinkC.scala:41:7]
wire [2:0] io_req_bits_size_0; // @[SinkC.scala:41:7]
wire [6:0] io_req_bits_source_0; // @[SinkC.scala:41:7]
wire [12:0] io_req_bits_tag_0; // @[SinkC.scala:41:7]
wire [5:0] io_req_bits_offset_0; // @[SinkC.scala:41:7]
wire [5:0] io_req_bits_put_0; // @[SinkC.scala:41:7]
wire [9:0] io_req_bits_set_0; // @[SinkC.scala:41:7]
wire io_req_valid_0; // @[SinkC.scala:41:7]
wire io_resp_bits_last_0; // @[SinkC.scala:41:7]
wire [9:0] io_resp_bits_set_0; // @[SinkC.scala:41:7]
wire [12:0] io_resp_bits_tag_0; // @[SinkC.scala:41:7]
wire [6:0] io_resp_bits_source_0; // @[SinkC.scala:41:7]
wire [2:0] io_resp_bits_param_0; // @[SinkC.scala:41:7]
wire io_resp_bits_data_0; // @[SinkC.scala:41:7]
wire io_resp_valid_0; // @[SinkC.scala:41:7]
wire io_c_ready_0; // @[SinkC.scala:41:7]
wire io_bs_adr_bits_noop_0; // @[SinkC.scala:41:7]
wire [2:0] io_bs_adr_bits_way_0; // @[SinkC.scala:41:7]
wire [9:0] io_bs_adr_bits_set_0; // @[SinkC.scala:41:7]
wire [1:0] io_bs_adr_bits_beat_0; // @[SinkC.scala:41:7]
wire [1:0] io_bs_adr_bits_mask_0; // @[SinkC.scala:41:7]
wire io_bs_adr_valid_0; // @[SinkC.scala:41:7]
wire [127:0] io_bs_dat_data_0; // @[SinkC.scala:41:7]
wire io_rel_pop_ready_0; // @[SinkC.scala:41:7]
wire [127:0] io_rel_beat_data_0; // @[SinkC.scala:41:7]
wire io_rel_beat_corrupt_0; // @[SinkC.scala:41:7]
wire _offset_T = _c_q_io_deq_bits_address[0]; // @[Decoupled.scala:362:21]
wire _offset_T_1 = _c_q_io_deq_bits_address[1]; // @[Decoupled.scala:362:21]
wire _offset_T_2 = _c_q_io_deq_bits_address[2]; // @[Decoupled.scala:362:21]
wire _offset_T_3 = _c_q_io_deq_bits_address[3]; // @[Decoupled.scala:362:21]
wire _offset_T_4 = _c_q_io_deq_bits_address[4]; // @[Decoupled.scala:362:21]
wire _offset_T_5 = _c_q_io_deq_bits_address[5]; // @[Decoupled.scala:362:21]
wire _offset_T_6 = _c_q_io_deq_bits_address[6]; // @[Decoupled.scala:362:21]
wire _offset_T_7 = _c_q_io_deq_bits_address[7]; // @[Decoupled.scala:362:21]
wire _offset_T_8 = _c_q_io_deq_bits_address[8]; // @[Decoupled.scala:362:21]
wire _offset_T_9 = _c_q_io_deq_bits_address[9]; // @[Decoupled.scala:362:21]
wire _offset_T_10 = _c_q_io_deq_bits_address[10]; // @[Decoupled.scala:362:21]
wire _offset_T_11 = _c_q_io_deq_bits_address[11]; // @[Decoupled.scala:362:21]
wire _offset_T_12 = _c_q_io_deq_bits_address[12]; // @[Decoupled.scala:362:21]
wire _offset_T_13 = _c_q_io_deq_bits_address[13]; // @[Decoupled.scala:362:21]
wire _offset_T_14 = _c_q_io_deq_bits_address[14]; // @[Decoupled.scala:362:21]
wire _offset_T_15 = _c_q_io_deq_bits_address[15]; // @[Decoupled.scala:362:21]
wire _offset_T_16 = _c_q_io_deq_bits_address[16]; // @[Decoupled.scala:362:21]
wire _offset_T_17 = _c_q_io_deq_bits_address[17]; // @[Decoupled.scala:362:21]
wire _offset_T_18 = _c_q_io_deq_bits_address[18]; // @[Decoupled.scala:362:21]
wire _offset_T_19 = _c_q_io_deq_bits_address[19]; // @[Decoupled.scala:362:21]
wire _offset_T_20 = _c_q_io_deq_bits_address[20]; // @[Decoupled.scala:362:21]
wire _offset_T_21 = _c_q_io_deq_bits_address[21]; // @[Decoupled.scala:362:21]
wire _offset_T_22 = _c_q_io_deq_bits_address[22]; // @[Decoupled.scala:362:21]
wire _offset_T_23 = _c_q_io_deq_bits_address[23]; // @[Decoupled.scala:362:21]
wire _offset_T_24 = _c_q_io_deq_bits_address[24]; // @[Decoupled.scala:362:21]
wire _offset_T_25 = _c_q_io_deq_bits_address[25]; // @[Decoupled.scala:362:21]
wire _offset_T_26 = _c_q_io_deq_bits_address[26]; // @[Decoupled.scala:362:21]
wire _offset_T_27 = _c_q_io_deq_bits_address[27]; // @[Decoupled.scala:362:21]
wire _offset_T_28 = _c_q_io_deq_bits_address[31]; // @[Decoupled.scala:362:21]
wire [1:0] offset_lo_lo_lo_hi = {_offset_T_2, _offset_T_1}; // @[Parameters.scala:214:{21,47}]
wire [2:0] offset_lo_lo_lo = {offset_lo_lo_lo_hi, _offset_T}; // @[Parameters.scala:214:{21,47}]
wire [1:0] offset_lo_lo_hi_lo = {_offset_T_4, _offset_T_3}; // @[Parameters.scala:214:{21,47}]
wire [1:0] offset_lo_lo_hi_hi = {_offset_T_6, _offset_T_5}; // @[Parameters.scala:214:{21,47}]
wire [3:0] offset_lo_lo_hi = {offset_lo_lo_hi_hi, offset_lo_lo_hi_lo}; // @[Parameters.scala:214:21]
wire [6:0] offset_lo_lo = {offset_lo_lo_hi, offset_lo_lo_lo}; // @[Parameters.scala:214:21]
wire [1:0] offset_lo_hi_lo_hi = {_offset_T_9, _offset_T_8}; // @[Parameters.scala:214:{21,47}]
wire [2:0] offset_lo_hi_lo = {offset_lo_hi_lo_hi, _offset_T_7}; // @[Parameters.scala:214:{21,47}]
wire [1:0] offset_lo_hi_hi_lo = {_offset_T_11, _offset_T_10}; // @[Parameters.scala:214:{21,47}]
wire [1:0] offset_lo_hi_hi_hi = {_offset_T_13, _offset_T_12}; // @[Parameters.scala:214:{21,47}]
wire [3:0] offset_lo_hi_hi = {offset_lo_hi_hi_hi, offset_lo_hi_hi_lo}; // @[Parameters.scala:214:21]
wire [6:0] offset_lo_hi = {offset_lo_hi_hi, offset_lo_hi_lo}; // @[Parameters.scala:214:21]
wire [13:0] offset_lo = {offset_lo_hi, offset_lo_lo}; // @[Parameters.scala:214:21]
wire [1:0] offset_hi_lo_lo_hi = {_offset_T_16, _offset_T_15}; // @[Parameters.scala:214:{21,47}]
wire [2:0] offset_hi_lo_lo = {offset_hi_lo_lo_hi, _offset_T_14}; // @[Parameters.scala:214:{21,47}]
wire [1:0] offset_hi_lo_hi_lo = {_offset_T_18, _offset_T_17}; // @[Parameters.scala:214:{21,47}]
wire [1:0] offset_hi_lo_hi_hi = {_offset_T_20, _offset_T_19}; // @[Parameters.scala:214:{21,47}]
wire [3:0] offset_hi_lo_hi = {offset_hi_lo_hi_hi, offset_hi_lo_hi_lo}; // @[Parameters.scala:214:21]
wire [6:0] offset_hi_lo = {offset_hi_lo_hi, offset_hi_lo_lo}; // @[Parameters.scala:214:21]
wire [1:0] offset_hi_hi_lo_lo = {_offset_T_22, _offset_T_21}; // @[Parameters.scala:214:{21,47}]
wire [1:0] offset_hi_hi_lo_hi = {_offset_T_24, _offset_T_23}; // @[Parameters.scala:214:{21,47}]
wire [3:0] offset_hi_hi_lo = {offset_hi_hi_lo_hi, offset_hi_hi_lo_lo}; // @[Parameters.scala:214:21]
wire [1:0] offset_hi_hi_hi_lo = {_offset_T_26, _offset_T_25}; // @[Parameters.scala:214:{21,47}]
wire [1:0] offset_hi_hi_hi_hi = {_offset_T_28, _offset_T_27}; // @[Parameters.scala:214:{21,47}]
wire [3:0] offset_hi_hi_hi = {offset_hi_hi_hi_hi, offset_hi_hi_hi_lo}; // @[Parameters.scala:214:21]
wire [7:0] offset_hi_hi = {offset_hi_hi_hi, offset_hi_hi_lo}; // @[Parameters.scala:214:21]
wire [14:0] offset_hi = {offset_hi_hi, offset_hi_lo}; // @[Parameters.scala:214:21]
wire [28:0] offset = {offset_hi, offset_lo}; // @[Parameters.scala:214:21]
wire [22:0] set = offset[28:6]; // @[Parameters.scala:214:21, :215:22]
wire [12:0] tag = set[22:10]; // @[Parameters.scala:215:22, :216:19]
assign tag_1 = tag; // @[Parameters.scala:216:19, :217:9]
assign io_req_bits_tag_0 = tag_1; // @[SinkC.scala:41:7]
assign io_resp_bits_tag_0 = tag_1; // @[SinkC.scala:41:7]
assign set_1 = set[9:0]; // @[Parameters.scala:215:22, :217:28]
assign io_req_bits_set_0 = set_1; // @[SinkC.scala:41:7]
assign io_resp_bits_set_0 = set_1; // @[SinkC.scala:41:7]
assign offset_1 = offset[5:0]; // @[Parameters.scala:214:21, :217:50]
assign io_req_bits_offset_0 = offset_1; // @[SinkC.scala:41:7]
wire _q_io_deq_ready_T_7; // @[SinkC.scala:134:19]
wire _T = _q_io_deq_ready_T_7 & _c_q_io_deq_valid; // @[Decoupled.scala:51:35, :362:21]
wire [12:0] _r_beats1_decode_T = 13'h3F << _c_q_io_deq_bits_size; // @[Decoupled.scala:362:21]
wire [5:0] _r_beats1_decode_T_1 = _r_beats1_decode_T[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _r_beats1_decode_T_2 = ~_r_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire [1:0] r_beats1_decode = _r_beats1_decode_T_2[5:4]; // @[package.scala:243:46]
wire r_beats1_opdata = _c_q_io_deq_bits_opcode[0]; // @[Decoupled.scala:362:21]
assign hasData = _c_q_io_deq_bits_opcode[0]; // @[Decoupled.scala:362:21]
wire [1:0] r_beats1 = r_beats1_opdata ? r_beats1_decode : 2'h0; // @[Edges.scala:102:36, :220:59, :221:14]
reg [1:0] r_counter; // @[Edges.scala:229:27]
wire [2:0] _r_counter1_T = {1'h0, r_counter} - 3'h1; // @[Edges.scala:229:27, :230:28]
wire [1:0] r_counter1 = _r_counter1_T[1:0]; // @[Edges.scala:230:28]
wire first = r_counter == 2'h0; // @[Edges.scala:229:27, :231:25]
wire _r_last_T = r_counter == 2'h1; // @[Edges.scala:229:27, :232:25]
wire _r_last_T_1 = r_beats1 == 2'h0; // @[Edges.scala:221:14, :232:43]
assign last = _r_last_T | _r_last_T_1; // @[Edges.scala:232:{25,33,43}]
assign io_resp_bits_last_0 = last; // @[Edges.scala:232:33]
wire r_3 = last & _T; // @[Decoupled.scala:51:35]
wire [1:0] _r_count_T = ~r_counter1; // @[Edges.scala:230:28, :234:27]
wire [1:0] beat = r_beats1 & _r_count_T; // @[Edges.scala:221:14, :234:{25,27}]
wire [1:0] _r_counter_T = first ? r_beats1 : r_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
assign io_resp_bits_data_0 = hasData; // @[Edges.scala:102:36]
wire _raw_resp_T = _c_q_io_deq_bits_opcode == 3'h4; // @[Decoupled.scala:362:21]
wire _raw_resp_T_1 = _c_q_io_deq_bits_opcode == 3'h5; // @[Decoupled.scala:362:21]
wire raw_resp = _raw_resp_T | _raw_resp_T_1; // @[SinkC.scala:78:{34,58,75}]
reg resp_r; // @[SinkC.scala:79:48]
wire resp = _c_q_io_deq_valid ? raw_resp : resp_r; // @[Decoupled.scala:362:21] |
Generate the Verilog code corresponding to the following Chisel files.
File Metadata.scala:
// See LICENSE.SiFive for license details.
// See LICENSE.Berkeley for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import freechips.rocketchip.rocket.constants.MemoryOpConstants
import freechips.rocketchip.util._
object ClientStates {
val width = 2
def Nothing = 0.U(width.W)
def Branch = 1.U(width.W)
def Trunk = 2.U(width.W)
def Dirty = 3.U(width.W)
def hasReadPermission(state: UInt): Bool = state > Nothing
def hasWritePermission(state: UInt): Bool = state > Branch
}
object MemoryOpCategories extends MemoryOpConstants {
def wr = Cat(true.B, true.B) // Op actually writes
def wi = Cat(false.B, true.B) // Future op will write
def rd = Cat(false.B, false.B) // Op only reads
def categorize(cmd: UInt): UInt = {
val cat = Cat(isWrite(cmd), isWriteIntent(cmd))
//assert(cat.isOneOf(wr,wi,rd), "Could not categorize command.")
cat
}
}
/** Stores the client-side coherence information,
* such as permissions on the data and whether the data is dirty.
* Its API can be used to make TileLink messages in response to
* memory operations, cache control oeprations, or Probe messages.
*/
class ClientMetadata extends Bundle {
/** Actual state information stored in this bundle */
val state = UInt(ClientStates.width.W)
/** Metadata equality */
def ===(rhs: UInt): Bool = state === rhs
def ===(rhs: ClientMetadata): Bool = state === rhs.state
def =/=(rhs: ClientMetadata): Bool = !this.===(rhs)
/** Is the block's data present in this cache */
def isValid(dummy: Int = 0): Bool = state > ClientStates.Nothing
/** Determine whether this cmd misses, and the new state (on hit) or param to be sent (on miss) */
private def growStarter(cmd: UInt): (Bool, UInt) = {
import MemoryOpCategories._
import TLPermissions._
import ClientStates._
val c = categorize(cmd)
MuxTLookup(Cat(c, state), (false.B, 0.U), Seq(
//(effect, am now) -> (was a hit, next)
Cat(rd, Dirty) -> (true.B, Dirty),
Cat(rd, Trunk) -> (true.B, Trunk),
Cat(rd, Branch) -> (true.B, Branch),
Cat(wi, Dirty) -> (true.B, Dirty),
Cat(wi, Trunk) -> (true.B, Trunk),
Cat(wr, Dirty) -> (true.B, Dirty),
Cat(wr, Trunk) -> (true.B, Dirty),
//(effect, am now) -> (was a miss, param)
Cat(rd, Nothing) -> (false.B, NtoB),
Cat(wi, Branch) -> (false.B, BtoT),
Cat(wi, Nothing) -> (false.B, NtoT),
Cat(wr, Branch) -> (false.B, BtoT),
Cat(wr, Nothing) -> (false.B, NtoT)))
}
/** Determine what state to go to after miss based on Grant param
* For now, doesn't depend on state (which may have been Probed).
*/
private def growFinisher(cmd: UInt, param: UInt): UInt = {
import MemoryOpCategories._
import TLPermissions._
import ClientStates._
val c = categorize(cmd)
//assert(c === rd || param === toT, "Client was expecting trunk permissions.")
MuxLookup(Cat(c, param), Nothing)(Seq(
//(effect param) -> (next)
Cat(rd, toB) -> Branch,
Cat(rd, toT) -> Trunk,
Cat(wi, toT) -> Trunk,
Cat(wr, toT) -> Dirty))
}
/** Does this cache have permissions on this block sufficient to perform op,
* and what to do next (Acquire message param or updated metadata). */
def onAccess(cmd: UInt): (Bool, UInt, ClientMetadata) = {
val r = growStarter(cmd)
(r._1, r._2, ClientMetadata(r._2))
}
/** Does a secondary miss on the block require another Acquire message */
def onSecondaryAccess(first_cmd: UInt, second_cmd: UInt): (Bool, Bool, UInt, ClientMetadata, UInt) = {
import MemoryOpCategories._
val r1 = growStarter(first_cmd)
val r2 = growStarter(second_cmd)
val needs_second_acq = isWriteIntent(second_cmd) && !isWriteIntent(first_cmd)
val hit_again = r1._1 && r2._1
val dirties = categorize(second_cmd) === wr
val biggest_grow_param = Mux(dirties, r2._2, r1._2)
val dirtiest_state = ClientMetadata(biggest_grow_param)
val dirtiest_cmd = Mux(dirties, second_cmd, first_cmd)
(needs_second_acq, hit_again, biggest_grow_param, dirtiest_state, dirtiest_cmd)
}
/** Metadata change on a returned Grant */
def onGrant(cmd: UInt, param: UInt): ClientMetadata = ClientMetadata(growFinisher(cmd, param))
/** Determine what state to go to based on Probe param */
private def shrinkHelper(param: UInt): (Bool, UInt, UInt) = {
import ClientStates._
import TLPermissions._
MuxTLookup(Cat(param, state), (false.B, 0.U, 0.U), Seq(
//(wanted, am now) -> (hasDirtyData resp, next)
Cat(toT, Dirty) -> (true.B, TtoT, Trunk),
Cat(toT, Trunk) -> (false.B, TtoT, Trunk),
Cat(toT, Branch) -> (false.B, BtoB, Branch),
Cat(toT, Nothing) -> (false.B, NtoN, Nothing),
Cat(toB, Dirty) -> (true.B, TtoB, Branch),
Cat(toB, Trunk) -> (false.B, TtoB, Branch), // Policy: Don't notify on clean downgrade
Cat(toB, Branch) -> (false.B, BtoB, Branch),
Cat(toB, Nothing) -> (false.B, NtoN, Nothing),
Cat(toN, Dirty) -> (true.B, TtoN, Nothing),
Cat(toN, Trunk) -> (false.B, TtoN, Nothing), // Policy: Don't notify on clean downgrade
Cat(toN, Branch) -> (false.B, BtoN, Nothing), // Policy: Don't notify on clean downgrade
Cat(toN, Nothing) -> (false.B, NtoN, Nothing)))
}
/** Translate cache control cmds into Probe param */
private def cmdToPermCap(cmd: UInt): UInt = {
import MemoryOpCategories._
import TLPermissions._
MuxLookup(cmd, toN)(Seq(
M_FLUSH -> toN,
M_PRODUCE -> toB,
M_CLEAN -> toT))
}
def onCacheControl(cmd: UInt): (Bool, UInt, ClientMetadata) = {
val r = shrinkHelper(cmdToPermCap(cmd))
(r._1, r._2, ClientMetadata(r._3))
}
def onProbe(param: UInt): (Bool, UInt, ClientMetadata) = {
val r = shrinkHelper(param)
(r._1, r._2, ClientMetadata(r._3))
}
}
/** Factories for ClientMetadata, including on reset */
object ClientMetadata {
def apply(perm: UInt) = {
val meta = Wire(new ClientMetadata)
meta.state := perm
meta
}
def onReset = ClientMetadata(ClientStates.Nothing)
def maximum = ClientMetadata(ClientStates.Dirty)
}
File HellaCache.scala:
// See LICENSE.SiFive for license details.
// See LICENSE.Berkeley for license details.
package freechips.rocketchip.rocket
import chisel3.{dontTouch, _}
import chisel3.util._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy.bundlebridge._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.amba.AMBAProtField
import freechips.rocketchip.diplomacy.{IdRange, TransferSizes, RegionType}
import freechips.rocketchip.tile.{L1CacheParams, HasL1CacheParameters, HasCoreParameters, CoreBundle, HasNonDiplomaticTileParameters, BaseTile, HasTileParameters}
import freechips.rocketchip.tilelink.{TLMasterParameters, TLClientNode, TLMasterPortParameters, TLEdgeOut, TLWidthWidget, TLFIFOFixer, ClientMetadata}
import freechips.rocketchip.util.{Code, RandomReplacement, ParameterizedBundle}
import freechips.rocketchip.util.{BooleanToAugmentedBoolean, IntToAugmentedInt}
import scala.collection.mutable.ListBuffer
case class DCacheParams(
nSets: Int = 64,
nWays: Int = 4,
rowBits: Int = 64,
subWordBits: Option[Int] = None,
replacementPolicy: String = "random",
nTLBSets: Int = 1,
nTLBWays: Int = 32,
nTLBBasePageSectors: Int = 4,
nTLBSuperpages: Int = 4,
tagECC: Option[String] = None,
dataECC: Option[String] = None,
dataECCBytes: Int = 1,
nMSHRs: Int = 1,
nSDQ: Int = 17,
nRPQ: Int = 16,
nMMIOs: Int = 1,
blockBytes: Int = 64,
separateUncachedResp: Boolean = false,
acquireBeforeRelease: Boolean = false,
pipelineWayMux: Boolean = false,
clockGate: Boolean = false,
scratch: Option[BigInt] = None) extends L1CacheParams {
def tagCode: Code = Code.fromString(tagECC)
def dataCode: Code = Code.fromString(dataECC)
def dataScratchpadBytes: Int = scratch.map(_ => nSets*blockBytes).getOrElse(0)
def replacement = new RandomReplacement(nWays)
def silentDrop: Boolean = !acquireBeforeRelease
require((!scratch.isDefined || nWays == 1),
"Scratchpad only allowed in direct-mapped cache.")
require((!scratch.isDefined || nMSHRs == 0),
"Scratchpad only allowed in blocking cache.")
if (scratch.isEmpty)
require(isPow2(nSets), s"nSets($nSets) must be pow2")
}
trait HasL1HellaCacheParameters extends HasL1CacheParameters with HasCoreParameters {
val cacheParams = tileParams.dcache.get
val cfg = cacheParams
def wordBits = coreDataBits
def wordBytes = coreDataBytes
def subWordBits = cacheParams.subWordBits.getOrElse(wordBits)
def subWordBytes = subWordBits / 8
def wordOffBits = log2Up(wordBytes)
def beatBytes = cacheBlockBytes / cacheDataBeats
def beatWords = beatBytes / wordBytes
def beatOffBits = log2Up(beatBytes)
def idxMSB = untagBits-1
def idxLSB = blockOffBits
def offsetmsb = idxLSB-1
def offsetlsb = wordOffBits
def rowWords = rowBits/wordBits
def doNarrowRead = coreDataBits * nWays % rowBits == 0
def eccBytes = cacheParams.dataECCBytes
val eccBits = cacheParams.dataECCBytes * 8
val encBits = cacheParams.dataCode.width(eccBits)
val encWordBits = encBits * (wordBits / eccBits)
def encDataBits = cacheParams.dataCode.width(coreDataBits) // NBDCache only
def encRowBits = encDataBits*rowWords
def lrscCycles = coreParams.lrscCycles // ISA requires 16-insn LRSC sequences to succeed
def lrscBackoff = 3 // disallow LRSC reacquisition briefly
def blockProbeAfterGrantCycles = 8 // give the processor some time to issue a request after a grant
def nIOMSHRs = cacheParams.nMMIOs
def maxUncachedInFlight = cacheParams.nMMIOs
def dataScratchpadSize = cacheParams.dataScratchpadBytes
require(rowBits >= coreDataBits, s"rowBits($rowBits) < coreDataBits($coreDataBits)")
if (!usingDataScratchpad)
require(rowBits == cacheDataBits, s"rowBits($rowBits) != cacheDataBits($cacheDataBits)")
// would need offset addr for puts if data width < xlen
require(xLen <= cacheDataBits, s"xLen($xLen) > cacheDataBits($cacheDataBits)")
}
abstract class L1HellaCacheModule(implicit val p: Parameters) extends Module
with HasL1HellaCacheParameters
abstract class L1HellaCacheBundle(implicit val p: Parameters) extends ParameterizedBundle()(p)
with HasL1HellaCacheParameters
/** Bundle definitions for HellaCache interfaces */
trait HasCoreMemOp extends HasL1HellaCacheParameters {
val addr = UInt(coreMaxAddrBits.W)
val idx = (usingVM && untagBits > pgIdxBits).option(UInt(coreMaxAddrBits.W))
val tag = UInt((coreParams.dcacheReqTagBits + log2Ceil(dcacheArbPorts)).W)
val cmd = UInt(M_SZ.W)
val size = UInt(log2Ceil(coreDataBytes.log2 + 1).W)
val signed = Bool()
val dprv = UInt(PRV.SZ.W)
val dv = Bool()
}
trait HasCoreData extends HasCoreParameters {
val data = UInt(coreDataBits.W)
val mask = UInt(coreDataBytes.W)
}
class HellaCacheReqInternal(implicit p: Parameters) extends CoreBundle()(p) with HasCoreMemOp {
val phys = Bool()
val no_resp = Bool() // The dcache may omit generating a response for this request
val no_alloc = Bool()
val no_xcpt = Bool()
}
class HellaCacheReq(implicit p: Parameters) extends HellaCacheReqInternal()(p) with HasCoreData
class HellaCacheResp(implicit p: Parameters) extends CoreBundle()(p)
with HasCoreMemOp
with HasCoreData {
val replay = Bool()
val has_data = Bool()
val data_word_bypass = UInt(coreDataBits.W)
val data_raw = UInt(coreDataBits.W)
val store_data = UInt(coreDataBits.W)
}
class AlignmentExceptions extends Bundle {
val ld = Bool()
val st = Bool()
}
class HellaCacheExceptions extends Bundle {
val ma = new AlignmentExceptions
val pf = new AlignmentExceptions
val gf = new AlignmentExceptions
val ae = new AlignmentExceptions
}
class HellaCacheWriteData(implicit p: Parameters) extends CoreBundle()(p) with HasCoreData
class HellaCachePerfEvents extends Bundle {
val acquire = Bool()
val release = Bool()
val grant = Bool()
val tlbMiss = Bool()
val blocked = Bool()
val canAcceptStoreThenLoad = Bool()
val canAcceptStoreThenRMW = Bool()
val canAcceptLoadThenLoad = Bool()
val storeBufferEmptyAfterLoad = Bool()
val storeBufferEmptyAfterStore = Bool()
}
// interface between D$ and processor/DTLB
class HellaCacheIO(implicit p: Parameters) extends CoreBundle()(p) {
val req = Decoupled(new HellaCacheReq)
val s1_kill = Output(Bool()) // kill previous cycle's req
val s1_data = Output(new HellaCacheWriteData()) // data for previous cycle's req
val s2_nack = Input(Bool()) // req from two cycles ago is rejected
val s2_nack_cause_raw = Input(Bool()) // reason for nack is store-load RAW hazard (performance hint)
val s2_kill = Output(Bool()) // kill req from two cycles ago
val s2_uncached = Input(Bool()) // advisory signal that the access is MMIO
val s2_paddr = Input(UInt(paddrBits.W)) // translated address
val resp = Flipped(Valid(new HellaCacheResp))
val replay_next = Input(Bool())
val s2_xcpt = Input(new HellaCacheExceptions)
val s2_gpa = Input(UInt(vaddrBitsExtended.W))
val s2_gpa_is_pte = Input(Bool())
val uncached_resp = tileParams.dcache.get.separateUncachedResp.option(Flipped(Decoupled(new HellaCacheResp)))
val ordered = Input(Bool())
val store_pending = Input(Bool()) // there is a store in a store buffer somewhere
val perf = Input(new HellaCachePerfEvents())
val keep_clock_enabled = Output(Bool()) // should D$ avoid clock-gating itself?
val clock_enabled = Input(Bool()) // is D$ currently being clocked?
}
/** Base classes for Diplomatic TL2 HellaCaches */
abstract class HellaCache(tileId: Int)(implicit p: Parameters) extends LazyModule
with HasNonDiplomaticTileParameters {
protected val cfg = tileParams.dcache.get
protected def cacheClientParameters = cfg.scratch.map(x => Seq()).getOrElse(Seq(TLMasterParameters.v1(
name = s"Core ${tileId} DCache",
sourceId = IdRange(0, 1 max cfg.nMSHRs),
supportsProbe = TransferSizes(cfg.blockBytes, cfg.blockBytes))))
protected def mmioClientParameters = Seq(TLMasterParameters.v1(
name = s"Core ${tileId} DCache MMIO",
sourceId = IdRange(firstMMIO, firstMMIO + cfg.nMMIOs),
requestFifo = true))
def firstMMIO = (cacheClientParameters.map(_.sourceId.end) :+ 0).max
val node = TLClientNode(Seq(TLMasterPortParameters.v1(
clients = cacheClientParameters ++ mmioClientParameters,
minLatency = 1,
requestFields = tileParams.core.useVM.option(Seq()).getOrElse(Seq(AMBAProtField())))))
val hartIdSinkNodeOpt = cfg.scratch.map(_ => BundleBridgeSink[UInt]())
val mmioAddressPrefixSinkNodeOpt = cfg.scratch.map(_ => BundleBridgeSink[UInt]())
val module: HellaCacheModule
def flushOnFenceI = cfg.scratch.isEmpty && !node.edges.out(0).manager.managers.forall(m => !m.supportsAcquireB || !m.executable || m.regionType >= RegionType.TRACKED || m.regionType <= RegionType.IDEMPOTENT)
def canSupportCFlushLine = !usingVM || cfg.blockBytes * cfg.nSets <= (1 << pgIdxBits)
require(!tileParams.core.haveCFlush || cfg.scratch.isEmpty, "CFLUSH_D_L1 instruction requires a D$")
}
class HellaCacheBundle(implicit p: Parameters) extends CoreBundle()(p) {
val cpu = Flipped(new HellaCacheIO)
val ptw = new TLBPTWIO()
val errors = new DCacheErrors
val tlb_port = new DCacheTLBPort
}
class HellaCacheModule(outer: HellaCache) extends LazyModuleImp(outer)
with HasL1HellaCacheParameters {
implicit val edge: TLEdgeOut = outer.node.edges.out(0)
val (tl_out, _) = outer.node.out(0)
val io = IO(new HellaCacheBundle)
val io_hartid = outer.hartIdSinkNodeOpt.map(_.bundle)
val io_mmio_address_prefix = outer.mmioAddressPrefixSinkNodeOpt.map(_.bundle)
dontTouch(io.cpu.resp) // Users like to monitor these fields even if the core ignores some signals
dontTouch(io.cpu.s1_data)
require(rowBits == edge.bundle.dataBits)
private val fifoManagers = edge.manager.managers.filter(TLFIFOFixer.allVolatile)
fifoManagers.foreach { m =>
require (m.fifoId == fifoManagers.head.fifoId,
s"IOMSHRs must be FIFO for all regions with effects, but HellaCache sees\n"+
s"${m.nodePath.map(_.name)}\nversus\n${fifoManagers.head.nodePath.map(_.name)}")
}
}
/** Support overriding which HellaCache is instantiated */
case object BuildHellaCache extends Field[BaseTile => Parameters => HellaCache](HellaCacheFactory.apply)
object HellaCacheFactory {
def apply(tile: BaseTile)(p: Parameters): HellaCache = {
if (tile.tileParams.dcache.get.nMSHRs == 0)
new DCache(tile.tileId, tile.crossing)(p)
else
new NonBlockingDCache(tile.tileId)(p)
}
}
/** Mix-ins for constructing tiles that have a HellaCache */
trait HasHellaCache { this: BaseTile =>
val module: HasHellaCacheModule
implicit val p: Parameters
var nDCachePorts = 0
lazy val dcache: HellaCache = LazyModule(p(BuildHellaCache)(this)(p))
tlMasterXbar.node := TLWidthWidget(tileParams.dcache.get.rowBits/8) := dcache.node
dcache.hartIdSinkNodeOpt.map { _ := hartIdNexusNode }
dcache.mmioAddressPrefixSinkNodeOpt.map { _ := mmioAddressPrefixNexusNode }
InModuleBody {
dcache.module.io.tlb_port := DontCare
}
}
trait HasHellaCacheModule {
val outer: HasHellaCache with HasTileParameters
implicit val p: Parameters
val dcachePorts = ListBuffer[HellaCacheIO]()
val dcacheArb = Module(new HellaCacheArbiter(outer.nDCachePorts)(outer.p))
outer.dcache.module.io.cpu <> dcacheArb.io.mem
}
/** Metadata array used for all HellaCaches */
class L1Metadata(implicit p: Parameters) extends L1HellaCacheBundle()(p) {
val coh = new ClientMetadata
val tag = UInt(tagBits.W)
}
object L1Metadata {
def apply(tag: Bits, coh: ClientMetadata)(implicit p: Parameters) = {
val meta = Wire(new L1Metadata)
meta.tag := tag
meta.coh := coh
meta
}
}
class L1MetaReadReq(implicit p: Parameters) extends L1HellaCacheBundle()(p) {
val idx = UInt(idxBits.W)
val way_en = UInt(nWays.W)
val tag = UInt(tagBits.W)
}
class L1MetaWriteReq(implicit p: Parameters) extends L1MetaReadReq()(p) {
val data = new L1Metadata
}
class L1MetadataArray[T <: L1Metadata](onReset: () => T)(implicit p: Parameters) extends L1HellaCacheModule()(p) {
val rstVal = onReset()
val io = IO(new Bundle {
val read = Flipped(Decoupled(new L1MetaReadReq))
val write = Flipped(Decoupled(new L1MetaWriteReq))
val resp = Output(Vec(nWays, rstVal.cloneType))
})
val rst_cnt = RegInit(0.U(log2Up(nSets+1).W))
val rst = rst_cnt < nSets.U
val waddr = Mux(rst, rst_cnt, io.write.bits.idx)
val wdata = Mux(rst, rstVal, io.write.bits.data).asUInt
val wmask = Mux(rst || (nWays == 1).B, (-1).S, io.write.bits.way_en.asSInt).asBools
val rmask = Mux(rst || (nWays == 1).B, (-1).S, io.read.bits.way_en.asSInt).asBools
when (rst) { rst_cnt := rst_cnt+1.U }
val metabits = rstVal.getWidth
val tag_array = SyncReadMem(nSets, Vec(nWays, UInt(metabits.W)))
val wen = rst || io.write.valid
when (wen) {
tag_array.write(waddr, VecInit.fill(nWays)(wdata), wmask)
}
io.resp := tag_array.read(io.read.bits.idx, io.read.fire).map(_.asTypeOf(chiselTypeOf(rstVal)))
io.read.ready := !wen // so really this could be a 6T RAM
io.write.ready := !rst
}
| module L1MetadataArray_1( // @[HellaCache.scala:322:7]
input clock, // @[HellaCache.scala:322:7]
input reset, // @[HellaCache.scala:322:7]
output io_read_ready, // @[HellaCache.scala:324:14]
input io_read_valid, // @[HellaCache.scala:324:14]
input [5:0] io_read_bits_idx, // @[HellaCache.scala:324:14]
input [3:0] io_read_bits_way_en, // @[HellaCache.scala:324:14]
input [19:0] io_read_bits_tag, // @[HellaCache.scala:324:14]
output io_write_ready, // @[HellaCache.scala:324:14]
input io_write_valid, // @[HellaCache.scala:324:14]
input [5:0] io_write_bits_idx, // @[HellaCache.scala:324:14]
input [3:0] io_write_bits_way_en, // @[HellaCache.scala:324:14]
input [19:0] io_write_bits_tag, // @[HellaCache.scala:324:14]
input [1:0] io_write_bits_data_coh_state, // @[HellaCache.scala:324:14]
input [19:0] io_write_bits_data_tag, // @[HellaCache.scala:324:14]
output [1:0] io_resp_0_coh_state, // @[HellaCache.scala:324:14]
output [19:0] io_resp_0_tag, // @[HellaCache.scala:324:14]
output [1:0] io_resp_1_coh_state, // @[HellaCache.scala:324:14]
output [19:0] io_resp_1_tag, // @[HellaCache.scala:324:14]
output [1:0] io_resp_2_coh_state, // @[HellaCache.scala:324:14]
output [19:0] io_resp_2_tag, // @[HellaCache.scala:324:14]
output [1:0] io_resp_3_coh_state, // @[HellaCache.scala:324:14]
output [19:0] io_resp_3_tag // @[HellaCache.scala:324:14]
);
wire tag_array_MPORT_1_en; // @[Decoupled.scala:51:35]
wire [5:0] tag_array_MPORT_addr; // @[HellaCache.scala:342:20]
wire [87:0] _tag_array_RW0_rdata; // @[HellaCache.scala:339:30]
wire io_read_valid_0 = io_read_valid; // @[HellaCache.scala:322:7]
wire [5:0] io_read_bits_idx_0 = io_read_bits_idx; // @[HellaCache.scala:322:7]
wire [3:0] io_read_bits_way_en_0 = io_read_bits_way_en; // @[HellaCache.scala:322:7]
wire [19:0] io_read_bits_tag_0 = io_read_bits_tag; // @[HellaCache.scala:322:7]
wire io_write_valid_0 = io_write_valid; // @[HellaCache.scala:322:7]
wire [5:0] io_write_bits_idx_0 = io_write_bits_idx; // @[HellaCache.scala:322:7]
wire [3:0] io_write_bits_way_en_0 = io_write_bits_way_en; // @[HellaCache.scala:322:7]
wire [19:0] io_write_bits_tag_0 = io_write_bits_tag; // @[HellaCache.scala:322:7]
wire [1:0] io_write_bits_data_coh_state_0 = io_write_bits_data_coh_state; // @[HellaCache.scala:322:7]
wire [19:0] io_write_bits_data_tag_0 = io_write_bits_data_tag; // @[HellaCache.scala:322:7]
wire [1:0] rstVal_meta_state = 2'h0; // @[Metadata.scala:160:20]
wire [1:0] rstVal_coh_state = 2'h0; // @[HellaCache.scala:305:20]
wire [19:0] rstVal_tag = 20'h0; // @[HellaCache.scala:305:20]
wire _io_read_ready_T; // @[HellaCache.scala:346:20]
wire [3:0] _rmask_T_1 = io_read_bits_way_en_0; // @[HellaCache.scala:322:7, :335:70]
wire _io_write_ready_T; // @[HellaCache.scala:347:21]
wire [3:0] _wmask_T_1 = io_write_bits_way_en_0; // @[HellaCache.scala:322:7, :334:71]
wire io_read_ready_0; // @[HellaCache.scala:322:7]
wire io_write_ready_0; // @[HellaCache.scala:322:7]
wire [1:0] io_resp_0_coh_state_0; // @[HellaCache.scala:322:7]
wire [19:0] io_resp_0_tag_0; // @[HellaCache.scala:322:7]
wire [1:0] io_resp_1_coh_state_0; // @[HellaCache.scala:322:7]
wire [19:0] io_resp_1_tag_0; // @[HellaCache.scala:322:7]
wire [1:0] io_resp_2_coh_state_0; // @[HellaCache.scala:322:7]
wire [19:0] io_resp_2_tag_0; // @[HellaCache.scala:322:7]
wire [1:0] io_resp_3_coh_state_0; // @[HellaCache.scala:322:7]
wire [19:0] io_resp_3_tag_0; // @[HellaCache.scala:322:7]
reg [6:0] rst_cnt; // @[HellaCache.scala:330:24]
wire rst = ~(rst_cnt[6]); // @[HellaCache.scala:330:24, :331:21]
wire _wmask_T = rst; // @[HellaCache.scala:331:21, :334:23]
wire _rmask_T = rst; // @[HellaCache.scala:331:21, :335:23]
wire [6:0] waddr = rst ? rst_cnt : {1'h0, io_write_bits_idx_0}; // @[HellaCache.scala:322:7, :330:24, :331:21, :332:18]
wire [1:0] _wdata_T_coh_state = rst ? 2'h0 : io_write_bits_data_coh_state_0; // @[HellaCache.scala:322:7, :331:21, :333:18]
wire [19:0] _wdata_T_tag = rst ? 20'h0 : io_write_bits_data_tag_0; // @[HellaCache.scala:322:7, :331:21, :333:18]
wire [21:0] wdata = {_wdata_T_coh_state, _wdata_T_tag}; // @[HellaCache.scala:333:{18,52}]
wire [3:0] _wmask_T_2 = _wmask_T ? 4'hF : _wmask_T_1; // @[HellaCache.scala:334:{18,23,71}]
wire wmask_0 = _wmask_T_2[0]; // @[HellaCache.scala:334:{18,79}]
wire wmask_1 = _wmask_T_2[1]; // @[HellaCache.scala:334:{18,79}]
wire wmask_2 = _wmask_T_2[2]; // @[HellaCache.scala:334:{18,79}]
wire wmask_3 = _wmask_T_2[3]; // @[HellaCache.scala:334:{18,79}]
wire [3:0] _rmask_T_2 = _rmask_T ? 4'hF : _rmask_T_1; // @[HellaCache.scala:335:{18,23,70}]
wire rmask_0 = _rmask_T_2[0]; // @[HellaCache.scala:335:{18,78}]
wire rmask_1 = _rmask_T_2[1]; // @[HellaCache.scala:335:{18,78}]
wire rmask_2 = _rmask_T_2[2]; // @[HellaCache.scala:335:{18,78}]
wire rmask_3 = _rmask_T_2[3]; // @[HellaCache.scala:335:{18,78}]
wire [7:0] _rst_cnt_T = {1'h0, rst_cnt} + 8'h1; // @[HellaCache.scala:330:24, :332:18, :336:34]
wire [6:0] _rst_cnt_T_1 = _rst_cnt_T[6:0]; // @[HellaCache.scala:336:34]
wire wen; // @[HellaCache.scala:340:17]
assign wen = rst | io_write_valid_0; // @[HellaCache.scala:322:7, :331:21, :340:17]
assign tag_array_MPORT_addr = waddr[5:0]; // @[HellaCache.scala:332:18, :342:20]
assign tag_array_MPORT_1_en = io_read_ready_0 & io_read_valid_0; // @[Decoupled.scala:51:35]
assign io_resp_0_tag_0 = _tag_array_RW0_rdata[19:0]; // @[HellaCache.scala:322:7, :339:30, :344:75]
assign io_resp_0_coh_state_0 = _tag_array_RW0_rdata[21:20]; // @[HellaCache.scala:322:7, :339:30, :344:75]
assign io_resp_1_tag_0 = _tag_array_RW0_rdata[41:22]; // @[HellaCache.scala:322:7, :339:30, :344:75]
assign io_resp_1_coh_state_0 = _tag_array_RW0_rdata[43:42]; // @[HellaCache.scala:322:7, :339:30, :344:75]
assign io_resp_2_tag_0 = _tag_array_RW0_rdata[63:44]; // @[HellaCache.scala:322:7, :339:30, :344:75]
assign io_resp_2_coh_state_0 = _tag_array_RW0_rdata[65:64]; // @[HellaCache.scala:322:7, :339:30, :344:75]
assign io_resp_3_tag_0 = _tag_array_RW0_rdata[85:66]; // @[HellaCache.scala:322:7, :339:30, :344:75]
assign io_resp_3_coh_state_0 = _tag_array_RW0_rdata[87:86]; // @[HellaCache.scala:322:7, :339:30, :344:75]
assign _io_read_ready_T = ~wen; // @[HellaCache.scala:340:17, :346:20]
assign io_read_ready_0 = _io_read_ready_T; // @[HellaCache.scala:322:7, :346:20]
assign _io_write_ready_T = ~rst; // @[HellaCache.scala:331:21, :347:21]
assign io_write_ready_0 = _io_write_ready_T; // @[HellaCache.scala:322:7, :347:21]
always @(posedge clock) begin // @[HellaCache.scala:322:7]
if (reset) // @[HellaCache.scala:322:7]
rst_cnt <= 7'h0; // @[HellaCache.scala:330:24]
else if (rst) // @[HellaCache.scala:331:21]
rst_cnt <= _rst_cnt_T_1; // @[HellaCache.scala:330:24, :336:34]
always @(posedge)
tag_array_1 tag_array ( // @[HellaCache.scala:339:30]
.RW0_addr (wen ? tag_array_MPORT_addr : io_read_bits_idx_0), // @[HellaCache.scala:322:7, :339:30, :340:17, :342:20]
.RW0_en (tag_array_MPORT_1_en | wen), // @[Decoupled.scala:51:35]
.RW0_clk (clock),
.RW0_wmode (wen), // @[HellaCache.scala:340:17]
.RW0_wdata ({4{wdata}}), // @[HellaCache.scala:333:52, :339:30]
.RW0_rdata (_tag_array_RW0_rdata),
.RW0_wmask ({wmask_3, wmask_2, wmask_1, wmask_0}) // @[HellaCache.scala:334:79, :339:30]
); // @[HellaCache.scala:339:30]
assign io_read_ready = io_read_ready_0; // @[HellaCache.scala:322:7]
assign io_write_ready = io_write_ready_0; // @[HellaCache.scala:322:7]
assign io_resp_0_coh_state = io_resp_0_coh_state_0; // @[HellaCache.scala:322:7]
assign io_resp_0_tag = io_resp_0_tag_0; // @[HellaCache.scala:322:7]
assign io_resp_1_coh_state = io_resp_1_coh_state_0; // @[HellaCache.scala:322:7]
assign io_resp_1_tag = io_resp_1_tag_0; // @[HellaCache.scala:322:7]
assign io_resp_2_coh_state = io_resp_2_coh_state_0; // @[HellaCache.scala:322:7]
assign io_resp_2_tag = io_resp_2_tag_0; // @[HellaCache.scala:322:7]
assign io_resp_3_coh_state = io_resp_3_coh_state_0; // @[HellaCache.scala:322:7]
assign io_resp_3_tag = io_resp_3_tag_0; // @[HellaCache.scala:322:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File PE.scala:
// See README.md for license details.
package gemmini
import chisel3._
import chisel3.util._
class PEControl[T <: Data : Arithmetic](accType: T) extends Bundle {
val dataflow = UInt(1.W) // TODO make this an Enum
val propagate = UInt(1.W) // Which register should be propagated (and which should be accumulated)?
val shift = UInt(log2Up(accType.getWidth).W) // TODO this isn't correct for Floats
}
class MacUnit[T <: Data](inputType: T, cType: T, dType: T) (implicit ev: Arithmetic[T]) extends Module {
import ev._
val io = IO(new Bundle {
val in_a = Input(inputType)
val in_b = Input(inputType)
val in_c = Input(cType)
val out_d = Output(dType)
})
io.out_d := io.in_c.mac(io.in_a, io.in_b)
}
// TODO update documentation
/**
* A PE implementing a MAC operation. Configured as fully combinational when integrated into a Mesh.
* @param width Data width of operands
*/
class PE[T <: Data](inputType: T, outputType: T, accType: T, df: Dataflow.Value, max_simultaneous_matmuls: Int)
(implicit ev: Arithmetic[T]) extends Module { // Debugging variables
import ev._
val io = IO(new Bundle {
val in_a = Input(inputType)
val in_b = Input(outputType)
val in_d = Input(outputType)
val out_a = Output(inputType)
val out_b = Output(outputType)
val out_c = Output(outputType)
val in_control = Input(new PEControl(accType))
val out_control = Output(new PEControl(accType))
val in_id = Input(UInt(log2Up(max_simultaneous_matmuls).W))
val out_id = Output(UInt(log2Up(max_simultaneous_matmuls).W))
val in_last = Input(Bool())
val out_last = Output(Bool())
val in_valid = Input(Bool())
val out_valid = Output(Bool())
val bad_dataflow = Output(Bool())
})
val cType = if (df == Dataflow.WS) inputType else accType
// When creating PEs that support multiple dataflows, the
// elaboration/synthesis tools often fail to consolidate and de-duplicate
// MAC units. To force mac circuitry to be re-used, we create a "mac_unit"
// module here which just performs a single MAC operation
val mac_unit = Module(new MacUnit(inputType,
if (df == Dataflow.WS) outputType else accType, outputType))
val a = io.in_a
val b = io.in_b
val d = io.in_d
val c1 = Reg(cType)
val c2 = Reg(cType)
val dataflow = io.in_control.dataflow
val prop = io.in_control.propagate
val shift = io.in_control.shift
val id = io.in_id
val last = io.in_last
val valid = io.in_valid
io.out_a := a
io.out_control.dataflow := dataflow
io.out_control.propagate := prop
io.out_control.shift := shift
io.out_id := id
io.out_last := last
io.out_valid := valid
mac_unit.io.in_a := a
val last_s = RegEnable(prop, valid)
val flip = last_s =/= prop
val shift_offset = Mux(flip, shift, 0.U)
// Which dataflow are we using?
val OUTPUT_STATIONARY = Dataflow.OS.id.U(1.W)
val WEIGHT_STATIONARY = Dataflow.WS.id.U(1.W)
// Is c1 being computed on, or propagated forward (in the output-stationary dataflow)?
val COMPUTE = 0.U(1.W)
val PROPAGATE = 1.U(1.W)
io.bad_dataflow := false.B
when ((df == Dataflow.OS).B || ((df == Dataflow.BOTH).B && dataflow === OUTPUT_STATIONARY)) {
when(prop === PROPAGATE) {
io.out_c := (c1 >> shift_offset).clippedToWidthOf(outputType)
io.out_b := b
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c2
c2 := mac_unit.io.out_d
c1 := d.withWidthOf(cType)
}.otherwise {
io.out_c := (c2 >> shift_offset).clippedToWidthOf(outputType)
io.out_b := b
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c1
c1 := mac_unit.io.out_d
c2 := d.withWidthOf(cType)
}
}.elsewhen ((df == Dataflow.WS).B || ((df == Dataflow.BOTH).B && dataflow === WEIGHT_STATIONARY)) {
when(prop === PROPAGATE) {
io.out_c := c1
mac_unit.io.in_b := c2.asTypeOf(inputType)
mac_unit.io.in_c := b
io.out_b := mac_unit.io.out_d
c1 := d
}.otherwise {
io.out_c := c2
mac_unit.io.in_b := c1.asTypeOf(inputType)
mac_unit.io.in_c := b
io.out_b := mac_unit.io.out_d
c2 := d
}
}.otherwise {
io.bad_dataflow := true.B
//assert(false.B, "unknown dataflow")
io.out_c := DontCare
io.out_b := DontCare
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c2
}
when (!valid) {
c1 := c1
c2 := c2
mac_unit.io.in_b := DontCare
mac_unit.io.in_c := DontCare
}
}
File Arithmetic.scala:
// A simple type class for Chisel datatypes that can add and multiply. To add your own type, simply create your own:
// implicit MyTypeArithmetic extends Arithmetic[MyType] { ... }
package gemmini
import chisel3._
import chisel3.util._
import hardfloat._
// Bundles that represent the raw bits of custom datatypes
case class Float(expWidth: Int, sigWidth: Int) extends Bundle {
val bits = UInt((expWidth + sigWidth).W)
val bias: Int = (1 << (expWidth-1)) - 1
}
case class DummySInt(w: Int) extends Bundle {
val bits = UInt(w.W)
def dontCare: DummySInt = {
val o = Wire(new DummySInt(w))
o.bits := 0.U
o
}
}
// The Arithmetic typeclass which implements various arithmetic operations on custom datatypes
abstract class Arithmetic[T <: Data] {
implicit def cast(t: T): ArithmeticOps[T]
}
abstract class ArithmeticOps[T <: Data](self: T) {
def *(t: T): T
def mac(m1: T, m2: T): T // Returns (m1 * m2 + self)
def +(t: T): T
def -(t: T): T
def >>(u: UInt): T // This is a rounding shift! Rounds away from 0
def >(t: T): Bool
def identity: T
def withWidthOf(t: T): T
def clippedToWidthOf(t: T): T // Like "withWidthOf", except that it saturates
def relu: T
def zero: T
def minimum: T
// Optional parameters, which only need to be defined if you want to enable various optimizations for transformers
def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[T])] = None
def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[T])] = None
def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = None
def mult_with_reciprocal[U <: Data](reciprocal: U) = self
}
object Arithmetic {
implicit object UIntArithmetic extends Arithmetic[UInt] {
override implicit def cast(self: UInt) = new ArithmeticOps(self) {
override def *(t: UInt) = self * t
override def mac(m1: UInt, m2: UInt) = m1 * m2 + self
override def +(t: UInt) = self + t
override def -(t: UInt) = self - t
override def >>(u: UInt) = {
// The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm
// TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here?
val point_five = Mux(u === 0.U, 0.U, self(u - 1.U))
val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U
val ones_digit = self(u)
val r = point_five & (zeros | ones_digit)
(self >> u).asUInt + r
}
override def >(t: UInt): Bool = self > t
override def withWidthOf(t: UInt) = self.asTypeOf(t)
override def clippedToWidthOf(t: UInt) = {
val sat = ((1 << (t.getWidth-1))-1).U
Mux(self > sat, sat, self)(t.getWidth-1, 0)
}
override def relu: UInt = self
override def zero: UInt = 0.U
override def identity: UInt = 1.U
override def minimum: UInt = 0.U
}
}
implicit object SIntArithmetic extends Arithmetic[SInt] {
override implicit def cast(self: SInt) = new ArithmeticOps(self) {
override def *(t: SInt) = self * t
override def mac(m1: SInt, m2: SInt) = m1 * m2 + self
override def +(t: SInt) = self + t
override def -(t: SInt) = self - t
override def >>(u: UInt) = {
// The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm
// TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here?
val point_five = Mux(u === 0.U, 0.U, self(u - 1.U))
val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U
val ones_digit = self(u)
val r = (point_five & (zeros | ones_digit)).asBool
(self >> u).asSInt + Mux(r, 1.S, 0.S)
}
override def >(t: SInt): Bool = self > t
override def withWidthOf(t: SInt) = {
if (self.getWidth >= t.getWidth)
self(t.getWidth-1, 0).asSInt
else {
val sign_bits = t.getWidth - self.getWidth
val sign = self(self.getWidth-1)
Cat(Cat(Seq.fill(sign_bits)(sign)), self).asTypeOf(t)
}
}
override def clippedToWidthOf(t: SInt): SInt = {
val maxsat = ((1 << (t.getWidth-1))-1).S
val minsat = (-(1 << (t.getWidth-1))).S
MuxCase(self, Seq((self > maxsat) -> maxsat, (self < minsat) -> minsat))(t.getWidth-1, 0).asSInt
}
override def relu: SInt = Mux(self >= 0.S, self, 0.S)
override def zero: SInt = 0.S
override def identity: SInt = 1.S
override def minimum: SInt = (-(1 << (self.getWidth-1))).S
override def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = {
// TODO this uses a floating point divider, but we should use an integer divider instead
val input = Wire(Decoupled(denom_t.cloneType))
val output = Wire(Decoupled(self.cloneType))
// We translate our integer to floating-point form so that we can use the hardfloat divider
val expWidth = log2Up(self.getWidth) + 1
val sigWidth = self.getWidth
def sin_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def uin_to_float(x: UInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := false.B
in_to_rec_fn.io.in := x
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = sin_to_float(self)
val denom_rec = uin_to_float(input.bits)
// Instantiate the hardloat divider
val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options))
input.ready := divider.io.inReady
divider.io.inValid := input.valid
divider.io.sqrtOp := false.B
divider.io.a := self_rec
divider.io.b := denom_rec
divider.io.roundingMode := consts.round_minMag
divider.io.detectTininess := consts.tininess_afterRounding
output.valid := divider.io.outValid_div
output.bits := float_to_in(divider.io.out)
assert(!output.valid || output.ready)
Some((input, output))
}
override def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = {
// TODO this uses a floating point divider, but we should use an integer divider instead
val input = Wire(Decoupled(UInt(0.W)))
val output = Wire(Decoupled(self.cloneType))
input.bits := DontCare
// We translate our integer to floating-point form so that we can use the hardfloat divider
val expWidth = log2Up(self.getWidth) + 1
val sigWidth = self.getWidth
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = in_to_float(self)
// Instantiate the hardloat sqrt
val sqrter = Module(new DivSqrtRecFN_small(expWidth, sigWidth, 0))
input.ready := sqrter.io.inReady
sqrter.io.inValid := input.valid
sqrter.io.sqrtOp := true.B
sqrter.io.a := self_rec
sqrter.io.b := DontCare
sqrter.io.roundingMode := consts.round_minMag
sqrter.io.detectTininess := consts.tininess_afterRounding
output.valid := sqrter.io.outValid_sqrt
output.bits := float_to_in(sqrter.io.out)
assert(!output.valid || output.ready)
Some((input, output))
}
override def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = u match {
case Float(expWidth, sigWidth) =>
val input = Wire(Decoupled(UInt(0.W)))
val output = Wire(Decoupled(u.cloneType))
input.bits := DontCare
// We translate our integer to floating-point form so that we can use the hardfloat divider
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
val self_rec = in_to_float(self)
val one_rec = in_to_float(1.S)
// Instantiate the hardloat divider
val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options))
input.ready := divider.io.inReady
divider.io.inValid := input.valid
divider.io.sqrtOp := false.B
divider.io.a := one_rec
divider.io.b := self_rec
divider.io.roundingMode := consts.round_near_even
divider.io.detectTininess := consts.tininess_afterRounding
output.valid := divider.io.outValid_div
output.bits := fNFromRecFN(expWidth, sigWidth, divider.io.out).asTypeOf(u)
assert(!output.valid || output.ready)
Some((input, output))
case _ => None
}
override def mult_with_reciprocal[U <: Data](reciprocal: U): SInt = reciprocal match {
case recip @ Float(expWidth, sigWidth) =>
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = in_to_float(self)
val reciprocal_rec = recFNFromFN(expWidth, sigWidth, recip.bits)
// Instantiate the hardloat divider
val muladder = Module(new MulRecFN(expWidth, sigWidth))
muladder.io.roundingMode := consts.round_near_even
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := reciprocal_rec
float_to_in(muladder.io.out)
case _ => self
}
}
}
implicit object FloatArithmetic extends Arithmetic[Float] {
// TODO Floating point arithmetic currently switches between recoded and standard formats for every operation. However, it should stay in the recoded format as it travels through the systolic array
override implicit def cast(self: Float): ArithmeticOps[Float] = new ArithmeticOps(self) {
override def *(t: Float): Float = {
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth))
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := t_rec_resized
val out = Wire(Float(self.expWidth, self.sigWidth))
out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
out
}
override def mac(m1: Float, m2: Float): Float = {
// Recode all operands
val m1_rec = recFNFromFN(m1.expWidth, m1.sigWidth, m1.bits)
val m2_rec = recFNFromFN(m2.expWidth, m2.sigWidth, m2.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Resize m1 to self's width
val m1_resizer = Module(new RecFNToRecFN(m1.expWidth, m1.sigWidth, self.expWidth, self.sigWidth))
m1_resizer.io.in := m1_rec
m1_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
m1_resizer.io.detectTininess := consts.tininess_afterRounding
val m1_rec_resized = m1_resizer.io.out
// Resize m2 to self's width
val m2_resizer = Module(new RecFNToRecFN(m2.expWidth, m2.sigWidth, self.expWidth, self.sigWidth))
m2_resizer.io.in := m2_rec
m2_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
m2_resizer.io.detectTininess := consts.tininess_afterRounding
val m2_rec_resized = m2_resizer.io.out
// Perform multiply-add
val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth))
muladder.io.op := 0.U
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := m1_rec_resized
muladder.io.b := m2_rec_resized
muladder.io.c := self_rec
// Convert result to standard format // TODO remove these intermediate recodings
val out = Wire(Float(self.expWidth, self.sigWidth))
out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
out
}
override def +(t: Float): Float = {
require(self.getWidth >= t.getWidth) // This just makes it easier to write the resizing code
// Recode all operands
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Generate 1 as a float
val in_to_rec_fn = Module(new INToRecFN(1, self.expWidth, self.sigWidth))
in_to_rec_fn.io.signedIn := false.B
in_to_rec_fn.io.in := 1.U
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
val one_rec = in_to_rec_fn.io.out
// Resize t
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
// Perform addition
val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth))
muladder.io.op := 0.U
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := t_rec_resized
muladder.io.b := one_rec
muladder.io.c := self_rec
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
result
}
override def -(t: Float): Float = {
val t_sgn = t.bits(t.getWidth-1)
val neg_t = Cat(~t_sgn, t.bits(t.getWidth-2,0)).asTypeOf(t)
self + neg_t
}
override def >>(u: UInt): Float = {
// Recode self
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Get 2^(-u) as a recoded float
val shift_exp = Wire(UInt(self.expWidth.W))
shift_exp := self.bias.U - u
val shift_fn = Cat(0.U(1.W), shift_exp, 0.U((self.sigWidth-1).W))
val shift_rec = recFNFromFN(self.expWidth, self.sigWidth, shift_fn)
assert(shift_exp =/= 0.U, "scaling by denormalized numbers is not currently supported")
// Multiply self and 2^(-u)
val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth))
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := shift_rec
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
result
}
override def >(t: Float): Bool = {
// Recode all operands
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Resize t to self's width
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
val comparator = Module(new CompareRecFN(self.expWidth, self.sigWidth))
comparator.io.a := self_rec
comparator.io.b := t_rec_resized
comparator.io.signaling := false.B
comparator.io.gt
}
override def withWidthOf(t: Float): Float = {
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth))
resizer.io.in := self_rec
resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
resizer.io.detectTininess := consts.tininess_afterRounding
val result = Wire(Float(t.expWidth, t.sigWidth))
result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out)
result
}
override def clippedToWidthOf(t: Float): Float = {
// TODO check for overflow. Right now, we just assume that overflow doesn't happen
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth))
resizer.io.in := self_rec
resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
resizer.io.detectTininess := consts.tininess_afterRounding
val result = Wire(Float(t.expWidth, t.sigWidth))
result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out)
result
}
override def relu: Float = {
val raw = rawFloatFromFN(self.expWidth, self.sigWidth, self.bits)
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := Mux(!raw.isZero && raw.sign, 0.U, self.bits)
result
}
override def zero: Float = 0.U.asTypeOf(self)
override def identity: Float = Cat(0.U(2.W), ~(0.U((self.expWidth-1).W)), 0.U((self.sigWidth-1).W)).asTypeOf(self)
override def minimum: Float = Cat(1.U, ~(0.U(self.expWidth.W)), 0.U((self.sigWidth-1).W)).asTypeOf(self)
}
}
implicit object DummySIntArithmetic extends Arithmetic[DummySInt] {
override implicit def cast(self: DummySInt) = new ArithmeticOps(self) {
override def *(t: DummySInt) = self.dontCare
override def mac(m1: DummySInt, m2: DummySInt) = self.dontCare
override def +(t: DummySInt) = self.dontCare
override def -(t: DummySInt) = self.dontCare
override def >>(t: UInt) = self.dontCare
override def >(t: DummySInt): Bool = false.B
override def identity = self.dontCare
override def withWidthOf(t: DummySInt) = self.dontCare
override def clippedToWidthOf(t: DummySInt) = self.dontCare
override def relu = self.dontCare
override def zero = self.dontCare
override def minimum: DummySInt = self.dontCare
}
}
}
| module MacUnit_17( // @[PE.scala:14:7]
input clock, // @[PE.scala:14:7]
input reset, // @[PE.scala:14:7]
input [7:0] io_in_a, // @[PE.scala:16:14]
input [7:0] io_in_b, // @[PE.scala:16:14]
input [19:0] io_in_c, // @[PE.scala:16:14]
output [19:0] io_out_d // @[PE.scala:16:14]
);
wire [7:0] io_in_a_0 = io_in_a; // @[PE.scala:14:7]
wire [7:0] io_in_b_0 = io_in_b; // @[PE.scala:14:7]
wire [19:0] io_in_c_0 = io_in_c; // @[PE.scala:14:7]
wire [19:0] _io_out_d_T_3; // @[Arithmetic.scala:93:54]
wire [19:0] io_out_d_0; // @[PE.scala:14:7]
wire [15:0] _io_out_d_T = {{8{io_in_a_0[7]}}, io_in_a_0} * {{8{io_in_b_0[7]}}, io_in_b_0}; // @[PE.scala:14:7]
wire [20:0] _io_out_d_T_1 = {{5{_io_out_d_T[15]}}, _io_out_d_T} + {io_in_c_0[19], io_in_c_0}; // @[PE.scala:14:7]
wire [19:0] _io_out_d_T_2 = _io_out_d_T_1[19:0]; // @[Arithmetic.scala:93:54]
assign _io_out_d_T_3 = _io_out_d_T_2; // @[Arithmetic.scala:93:54]
assign io_out_d_0 = _io_out_d_T_3; // @[PE.scala:14:7]
assign io_out_d = io_out_d_0; // @[PE.scala:14:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Misc.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
import chisel3.util.random.LFSR
import org.chipsalliance.cde.config.Parameters
import scala.math._
class ParameterizedBundle(implicit p: Parameters) extends Bundle
trait Clocked extends Bundle {
val clock = Clock()
val reset = Bool()
}
object DecoupledHelper {
def apply(rvs: Bool*) = new DecoupledHelper(rvs)
}
class DecoupledHelper(val rvs: Seq[Bool]) {
def fire(exclude: Bool, includes: Bool*) = {
require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!")
(rvs.filter(_ ne exclude) ++ includes).reduce(_ && _)
}
def fire() = {
rvs.reduce(_ && _)
}
}
object MuxT {
def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2))
def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3))
def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4))
}
/** Creates a cascade of n MuxTs to search for a key value. */
object MuxTLookup {
def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
}
object ValidMux {
def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = {
apply(v1 +: v2.toSeq)
}
def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = {
val out = Wire(Valid(valids.head.bits.cloneType))
out.valid := valids.map(_.valid).reduce(_ || _)
out.bits := MuxCase(valids.head.bits,
valids.map(v => (v.valid -> v.bits)))
out
}
}
object Str
{
def apply(s: String): UInt = {
var i = BigInt(0)
require(s.forall(validChar _))
for (c <- s)
i = (i << 8) | c
i.U((s.length*8).W)
}
def apply(x: Char): UInt = {
require(validChar(x))
x.U(8.W)
}
def apply(x: UInt): UInt = apply(x, 10)
def apply(x: UInt, radix: Int): UInt = {
val rad = radix.U
val w = x.getWidth
require(w > 0)
var q = x
var s = digit(q % rad)
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s)
}
s
}
def apply(x: SInt): UInt = apply(x, 10)
def apply(x: SInt, radix: Int): UInt = {
val neg = x < 0.S
val abs = x.abs.asUInt
if (radix != 10) {
Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix))
} else {
val rad = radix.U
val w = abs.getWidth
require(w > 0)
var q = abs
var s = digit(q % rad)
var needSign = neg
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
val placeSpace = q === 0.U
val space = Mux(needSign, Str('-'), Str(' '))
needSign = needSign && !placeSpace
s = Cat(Mux(placeSpace, space, digit(q % rad)), s)
}
Cat(Mux(needSign, Str('-'), Str(' ')), s)
}
}
private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0)
private def validChar(x: Char) = x == (x & 0xFF)
}
object Split
{
def apply(x: UInt, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n2: Int, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
}
object Random
{
def apply(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0)
else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod))
}
def apply(mod: Int): UInt = apply(mod, randomizer)
def oneHot(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0))
else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt
}
def oneHot(mod: Int): UInt = oneHot(mod, randomizer)
private def randomizer = LFSR(16)
private def partition(value: UInt, slices: Int) =
Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U)
}
object Majority {
def apply(in: Set[Bool]): Bool = {
val n = (in.size >> 1) + 1
val clauses = in.subsets(n).map(_.reduce(_ && _))
clauses.reduce(_ || _)
}
def apply(in: Seq[Bool]): Bool = apply(in.toSet)
def apply(in: UInt): Bool = apply(in.asBools.toSet)
}
object PopCountAtLeast {
private def two(x: UInt): (Bool, Bool) = x.getWidth match {
case 1 => (x.asBool, false.B)
case n =>
val half = x.getWidth / 2
val (leftOne, leftTwo) = two(x(half - 1, 0))
val (rightOne, rightTwo) = two(x(x.getWidth - 1, half))
(leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne))
}
def apply(x: UInt, n: Int): Bool = n match {
case 0 => true.B
case 1 => x.orR
case 2 => two(x)._2
case 3 => PopCount(x) >= n.U
}
}
// This gets used everywhere, so make the smallest circuit possible ...
// Given an address and size, create a mask of beatBytes size
// eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111
// groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01
object MaskGen {
def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = {
require (groupBy >= 1 && beatBytes >= groupBy)
require (isPow2(beatBytes) && isPow2(groupBy))
val lgBytes = log2Ceil(beatBytes)
val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U
def helper(i: Int): Seq[(Bool, Bool)] = {
if (i == 0) {
Seq((lgSize >= lgBytes.asUInt, true.B))
} else {
val sub = helper(i-1)
val size = sizeOH(lgBytes - i)
val bit = addr_lo(lgBytes - i)
val nbit = !bit
Seq.tabulate (1 << i) { j =>
val (sub_acc, sub_eq) = sub(j/2)
val eq = sub_eq && (if (j % 2 == 1) bit else nbit)
val acc = sub_acc || (size && eq)
(acc, eq)
}
}
}
if (groupBy == beatBytes) 1.U else
Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse)
}
}
File AtomicAutomata.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.diplomacy.{AddressSet, TransferSizes}
import freechips.rocketchip.util.leftOR
import scala.math.{min,max}
// Ensures that all downstream RW managers support Atomic operations.
// If !passthrough, intercept all Atomics. Otherwise, only intercept those unsupported downstream.
class TLAtomicAutomata(logical: Boolean = true, arithmetic: Boolean = true, concurrency: Int = 1, passthrough: Boolean = true)(implicit p: Parameters) extends LazyModule
{
require (concurrency >= 1)
val node = TLAdapterNode(
managerFn = { case mp => mp.v1copy(managers = mp.managers.map { m =>
val ourSupport = TransferSizes(1, mp.beatBytes)
def widen(x: TransferSizes) = if (passthrough && x.min <= 2*mp.beatBytes) TransferSizes(1, max(mp.beatBytes, x.max)) else ourSupport
val canDoit = m.supportsPutFull.contains(ourSupport) && m.supportsGet.contains(ourSupport)
// Blow up if there are devices to which we cannot add Atomics, because their R|W are too inflexible
require (!m.supportsPutFull || !m.supportsGet || canDoit, s"${m.name} has $ourSupport, needed PutFull(${m.supportsPutFull}) or Get(${m.supportsGet})")
m.v1copy(
supportsArithmetic = if (!arithmetic || !canDoit) m.supportsArithmetic else widen(m.supportsArithmetic),
supportsLogical = if (!logical || !canDoit) m.supportsLogical else widen(m.supportsLogical),
mayDenyGet = m.mayDenyGet || m.mayDenyPut)
})})
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
val managers = edgeOut.manager.managers
val beatBytes = edgeOut.manager.beatBytes
// To which managers are we adding atomic support?
val ourSupport = TransferSizes(1, beatBytes)
val managersNeedingHelp = managers.filter { m =>
m.supportsPutFull.contains(ourSupport) &&
m.supportsGet.contains(ourSupport) &&
((logical && !m.supportsLogical .contains(ourSupport)) ||
(arithmetic && !m.supportsArithmetic.contains(ourSupport)) ||
!passthrough) // we will do atomics for everyone we can
}
// Managers that need help with atomics must necessarily have this node as the root of a tree in the node graph.
// (But they must also ensure no sideband operations can get between the read and write.)
val violations = managersNeedingHelp.flatMap(_.findTreeViolation()).map { node => (node.name, node.inputs.map(_._1.name)) }
require(violations.isEmpty,
s"AtomicAutomata can only help nodes for which it is at the root of a diplomatic node tree," +
"but the following violations were found:\n" +
violations.map(v => s"(${v._1} has parents ${v._2})").mkString("\n"))
// We cannot add atomics to a non-FIFO manager
managersNeedingHelp foreach { m => require (m.fifoId.isDefined) }
// We need to preserve FIFO semantics across FIFO domains, not managers
// Suppose you have Put(42) Atomic(+1) both inflight; valid results: 42 or 43
// If we allow Put(42) Get() Put(+1) concurrent; valid results: 42 43 OR undef
// Making non-FIFO work requires waiting for all Acks to come back (=> use FIFOFixer)
val domainsNeedingHelp = managersNeedingHelp.map(_.fifoId.get).distinct
// Don't overprovision the CAM
val camSize = min(domainsNeedingHelp.size, concurrency)
// Compact the fifoIds to only those we care about
def camFifoId(m: TLSlaveParameters) = m.fifoId.map(id => max(0, domainsNeedingHelp.indexOf(id))).getOrElse(0)
// CAM entry state machine
val FREE = 0.U // unused waiting on Atomic from A
val GET = 3.U // Get sent down A waiting on AccessDataAck from D
val AMO = 2.U // AccessDataAck sent up D waiting for A availability
val ACK = 1.U // Put sent down A waiting for PutAck from D
val params = TLAtomicAutomata.CAMParams(out.a.bits.params, domainsNeedingHelp.size)
// Do we need to do anything at all?
if (camSize > 0) {
val initval = Wire(new TLAtomicAutomata.CAM_S(params))
initval.state := FREE
val cam_s = RegInit(VecInit.fill(camSize)(initval))
val cam_a = Reg(Vec(camSize, new TLAtomicAutomata.CAM_A(params)))
val cam_d = Reg(Vec(camSize, new TLAtomicAutomata.CAM_D(params)))
val cam_free = cam_s.map(_.state === FREE)
val cam_amo = cam_s.map(_.state === AMO)
val cam_abusy = cam_s.map(e => e.state === GET || e.state === AMO) // A is blocked
val cam_dmatch = cam_s.map(e => e.state =/= FREE) // D should inspect these entries
// Can the manager already handle this message?
val a_address = edgeIn.address(in.a.bits)
val a_size = edgeIn.size(in.a.bits)
val a_canLogical = passthrough.B && edgeOut.manager.supportsLogicalFast (a_address, a_size)
val a_canArithmetic = passthrough.B && edgeOut.manager.supportsArithmeticFast(a_address, a_size)
val a_isLogical = in.a.bits.opcode === TLMessages.LogicalData
val a_isArithmetic = in.a.bits.opcode === TLMessages.ArithmeticData
val a_isSupported = Mux(a_isLogical, a_canLogical, Mux(a_isArithmetic, a_canArithmetic, true.B))
// Must we do a Put?
val a_cam_any_put = cam_amo.reduce(_ || _)
val a_cam_por_put = cam_amo.scanLeft(false.B)(_||_).init
val a_cam_sel_put = (cam_amo zip a_cam_por_put) map { case (a, b) => a && !b }
val a_cam_a = PriorityMux(cam_amo, cam_a)
val a_cam_d = PriorityMux(cam_amo, cam_d)
val a_a = a_cam_a.bits.data
val a_d = a_cam_d.data
// Does the A request conflict with an inflight AMO?
val a_fifoId = edgeOut.manager.fastProperty(a_address, camFifoId _, (i:Int) => i.U)
val a_cam_busy = (cam_abusy zip cam_a.map(_.fifoId === a_fifoId)) map { case (a,b) => a&&b } reduce (_||_)
// (Where) are we are allocating in the CAM?
val a_cam_any_free = cam_free.reduce(_ || _)
val a_cam_por_free = cam_free.scanLeft(false.B)(_||_).init
val a_cam_sel_free = (cam_free zip a_cam_por_free) map { case (a,b) => a && !b }
// Logical AMO
val indexes = Seq.tabulate(beatBytes*8) { i => Cat(a_a(i,i), a_d(i,i)) }
val logic_out = Cat(indexes.map(x => a_cam_a.lut(x).asUInt).reverse)
// Arithmetic AMO
val unsigned = a_cam_a.bits.param(1)
val take_max = a_cam_a.bits.param(0)
val adder = a_cam_a.bits.param(2)
val mask = a_cam_a.bits.mask
val signSel = ~(~mask | (mask >> 1))
val signbits_a = Cat(Seq.tabulate(beatBytes) { i => a_a(8*i+7,8*i+7) } .reverse)
val signbits_d = Cat(Seq.tabulate(beatBytes) { i => a_d(8*i+7,8*i+7) } .reverse)
// Move the selected sign bit into the first byte position it will extend
val signbit_a = ((signbits_a & signSel) << 1)(beatBytes-1, 0)
val signbit_d = ((signbits_d & signSel) << 1)(beatBytes-1, 0)
val signext_a = FillInterleaved(8, leftOR(signbit_a))
val signext_d = FillInterleaved(8, leftOR(signbit_d))
// NOTE: sign-extension does not change the relative ordering in EITHER unsigned or signed arithmetic
val wide_mask = FillInterleaved(8, mask)
val a_a_ext = (a_a & wide_mask) | signext_a
val a_d_ext = (a_d & wide_mask) | signext_d
val a_d_inv = Mux(adder, a_d_ext, ~a_d_ext)
val adder_out = a_a_ext + a_d_inv
val h = 8*beatBytes-1 // now sign-extended; use biggest bit
val a_bigger_uneq = unsigned === a_a_ext(h) // result if high bits are unequal
val a_bigger = Mux(a_a_ext(h) === a_d_ext(h), !adder_out(h), a_bigger_uneq)
val pick_a = take_max === a_bigger
val arith_out = Mux(adder, adder_out, Mux(pick_a, a_a, a_d))
// AMO result data
val amo_data =
if (!logical) arith_out else
if (!arithmetic) logic_out else
Mux(a_cam_a.bits.opcode(0), logic_out, arith_out)
// Potentially mutate the message from inner
val source_i = Wire(chiselTypeOf(in.a))
val a_allow = !a_cam_busy && (a_isSupported || a_cam_any_free)
in.a.ready := source_i.ready && a_allow
source_i.valid := in.a.valid && a_allow
source_i.bits := in.a.bits
when (!a_isSupported) { // minimal mux difference
source_i.bits.opcode := TLMessages.Get
source_i.bits.param := 0.U
}
// Potentially take the message from the CAM
val source_c = Wire(chiselTypeOf(in.a))
source_c.valid := a_cam_any_put
source_c.bits := edgeOut.Put(
fromSource = a_cam_a.bits.source,
toAddress = edgeIn.address(a_cam_a.bits),
lgSize = a_cam_a.bits.size,
data = amo_data,
corrupt = a_cam_a.bits.corrupt || a_cam_d.corrupt)._2
source_c.bits.user :<= a_cam_a.bits.user
source_c.bits.echo :<= a_cam_a.bits.echo
// Finishing an AMO from the CAM has highest priority
TLArbiter(TLArbiter.lowestIndexFirst)(out.a, (0.U, source_c), (edgeOut.numBeats1(in.a.bits), source_i))
// Capture the A state into the CAM
when (source_i.fire && !a_isSupported) {
(a_cam_sel_free zip cam_a) foreach { case (en, r) =>
when (en) {
r.fifoId := a_fifoId
r.bits := in.a.bits
r.lut := MuxLookup(in.a.bits.param(1, 0), 0.U(4.W))(Array(
TLAtomics.AND -> 0x8.U,
TLAtomics.OR -> 0xe.U,
TLAtomics.XOR -> 0x6.U,
TLAtomics.SWAP -> 0xc.U))
}
}
(a_cam_sel_free zip cam_s) foreach { case (en, r) =>
when (en) {
r.state := GET
}
}
}
// Advance the put state
when (source_c.fire) {
(a_cam_sel_put zip cam_s) foreach { case (en, r) =>
when (en) {
r.state := ACK
}
}
}
// We need to deal with a potential D response in the same cycle as the A request
val d_first = edgeOut.first(out.d)
val d_cam_sel_raw = cam_a.map(_.bits.source === in.d.bits.source)
val d_cam_sel_match = (d_cam_sel_raw zip cam_dmatch) map { case (a,b) => a&&b }
val d_cam_data = Mux1H(d_cam_sel_match, cam_d.map(_.data))
val d_cam_denied = Mux1H(d_cam_sel_match, cam_d.map(_.denied))
val d_cam_corrupt = Mux1H(d_cam_sel_match, cam_d.map(_.corrupt))
val d_cam_sel_bypass = if (edgeOut.manager.minLatency > 0) false.B else
out.d.bits.source === in.a.bits.source && in.a.valid && !a_isSupported
val d_cam_sel = (a_cam_sel_free zip d_cam_sel_match) map { case (a,d) => Mux(d_cam_sel_bypass, a, d) }
val d_cam_sel_any = d_cam_sel_bypass || d_cam_sel_match.reduce(_ || _)
val d_ackd = out.d.bits.opcode === TLMessages.AccessAckData
val d_ack = out.d.bits.opcode === TLMessages.AccessAck
when (out.d.fire && d_first) {
(d_cam_sel zip cam_d) foreach { case (en, r) =>
when (en && d_ackd) {
r.data := out.d.bits.data
r.denied := out.d.bits.denied
r.corrupt := out.d.bits.corrupt
}
}
(d_cam_sel zip cam_s) foreach { case (en, r) =>
when (en) {
// Note: it is important that this comes AFTER the := GET, so we can go FREE=>GET=>AMO in one cycle
r.state := Mux(d_ackd, AMO, FREE)
}
}
}
val d_drop = d_first && d_ackd && d_cam_sel_any
val d_replace = d_first && d_ack && d_cam_sel_match.reduce(_ || _)
in.d.valid := out.d.valid && !d_drop
out.d.ready := in.d.ready || d_drop
in.d.bits := out.d.bits
when (d_replace) { // minimal muxes
in.d.bits.opcode := TLMessages.AccessAckData
in.d.bits.data := d_cam_data
in.d.bits.corrupt := d_cam_corrupt || out.d.bits.denied
in.d.bits.denied := d_cam_denied || out.d.bits.denied
}
} else {
out.a.valid := in.a.valid
in.a.ready := out.a.ready
out.a.bits := in.a.bits
in.d.valid := out.d.valid
out.d.ready := in.d.ready
in.d.bits := out.d.bits
}
if (edgeOut.manager.anySupportAcquireB && edgeIn.client.anySupportProbe) {
in.b.valid := out.b.valid
out.b.ready := in.b.ready
in.b.bits := out.b.bits
out.c.valid := in.c.valid
in.c.ready := out.c.ready
out.c.bits := in.c.bits
out.e.valid := in.e.valid
in.e.ready := out.e.ready
out.e.bits := in.e.bits
} else {
in.b.valid := false.B
in.c.ready := true.B
in.e.ready := true.B
out.b.ready := true.B
out.c.valid := false.B
out.e.valid := false.B
}
}
}
}
object TLAtomicAutomata
{
def apply(logical: Boolean = true, arithmetic: Boolean = true, concurrency: Int = 1, passthrough: Boolean = true, nameSuffix: Option[String] = None)(implicit p: Parameters): TLNode =
{
val atomics = LazyModule(new TLAtomicAutomata(logical, arithmetic, concurrency, passthrough) {
override lazy val desiredName = (Seq("TLAtomicAutomata") ++ nameSuffix).mkString("_")
})
atomics.node
}
case class CAMParams(a: TLBundleParameters, domainsNeedingHelp: Int)
class CAM_S(val params: CAMParams) extends Bundle {
val state = UInt(2.W)
}
class CAM_A(val params: CAMParams) extends Bundle {
val bits = new TLBundleA(params.a)
val fifoId = UInt(log2Up(params.domainsNeedingHelp).W)
val lut = UInt(4.W)
}
class CAM_D(val params: CAMParams) extends Bundle {
val data = UInt(params.a.dataBits.W)
val denied = Bool()
val corrupt = Bool()
}
}
// Synthesizable unit tests
import freechips.rocketchip.unittest._
class TLRAMAtomicAutomata(txns: Int)(implicit p: Parameters) extends LazyModule {
val fuzz = LazyModule(new TLFuzzer(txns))
val model = LazyModule(new TLRAMModel("AtomicAutomata"))
val ram = LazyModule(new TLRAM(AddressSet(0x0, 0x3ff)))
// Confirm that the AtomicAutomata combines read + write errors
import TLMessages._
val test = new RequestPattern({a: TLBundleA =>
val doesA = a.opcode === ArithmeticData || a.opcode === LogicalData
val doesR = a.opcode === Get || doesA
val doesW = a.opcode === PutFullData || a.opcode === PutPartialData || doesA
(doesR && RequestPattern.overlaps(Seq(AddressSet(0x08, ~0x08)))(a)) ||
(doesW && RequestPattern.overlaps(Seq(AddressSet(0x10, ~0x10)))(a))
})
(ram.node
:= TLErrorEvaluator(test)
:= TLFragmenter(4, 256)
:= TLDelayer(0.1)
:= TLAtomicAutomata()
:= TLDelayer(0.1)
:= TLErrorEvaluator(test, testOn=true, testOff=true)
:= model.node
:= fuzz.node)
lazy val module = new Impl
class Impl extends LazyModuleImp(this) with UnitTestModule {
io.finished := fuzz.module.io.finished
}
}
class TLRAMAtomicAutomataTest(txns: Int = 5000, timeout: Int = 500000)(implicit p: Parameters) extends UnitTest(timeout) {
val dut = Module(LazyModule(new TLRAMAtomicAutomata(txns)).module)
io.finished := dut.io.finished
dut.io.start := io.start
}
File Nodes.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.nodes._
import freechips.rocketchip.util.{AsyncQueueParams,RationalDirection}
case object TLMonitorBuilder extends Field[TLMonitorArgs => TLMonitorBase](args => new TLMonitor(args))
object TLImp extends NodeImp[TLMasterPortParameters, TLSlavePortParameters, TLEdgeOut, TLEdgeIn, TLBundle]
{
def edgeO(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeOut(pd, pu, p, sourceInfo)
def edgeI(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeIn (pd, pu, p, sourceInfo)
def bundleO(eo: TLEdgeOut) = TLBundle(eo.bundle)
def bundleI(ei: TLEdgeIn) = TLBundle(ei.bundle)
def render(ei: TLEdgeIn) = RenderedEdge(colour = "#000000" /* black */, label = (ei.manager.beatBytes * 8).toString)
override def monitor(bundle: TLBundle, edge: TLEdgeIn): Unit = {
val monitor = Module(edge.params(TLMonitorBuilder)(TLMonitorArgs(edge)))
monitor.io.in := bundle
}
override def mixO(pd: TLMasterPortParameters, node: OutwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLMasterPortParameters =
pd.v1copy(clients = pd.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })
override def mixI(pu: TLSlavePortParameters, node: InwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLSlavePortParameters =
pu.v1copy(managers = pu.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })
}
trait TLFormatNode extends FormatNode[TLEdgeIn, TLEdgeOut]
case class TLClientNode(portParams: Seq[TLMasterPortParameters])(implicit valName: ValName) extends SourceNode(TLImp)(portParams) with TLFormatNode
case class TLManagerNode(portParams: Seq[TLSlavePortParameters])(implicit valName: ValName) extends SinkNode(TLImp)(portParams) with TLFormatNode
case class TLAdapterNode(
clientFn: TLMasterPortParameters => TLMasterPortParameters = { s => s },
managerFn: TLSlavePortParameters => TLSlavePortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLImp)(clientFn, managerFn) with TLFormatNode
case class TLJunctionNode(
clientFn: Seq[TLMasterPortParameters] => Seq[TLMasterPortParameters],
managerFn: Seq[TLSlavePortParameters] => Seq[TLSlavePortParameters])(
implicit valName: ValName)
extends JunctionNode(TLImp)(clientFn, managerFn) with TLFormatNode
case class TLIdentityNode()(implicit valName: ValName) extends IdentityNode(TLImp)() with TLFormatNode
object TLNameNode {
def apply(name: ValName) = TLIdentityNode()(name)
def apply(name: Option[String]): TLIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLIdentityNode = apply(Some(name))
}
case class TLEphemeralNode()(implicit valName: ValName) extends EphemeralNode(TLImp)()
object TLTempNode {
def apply(): TLEphemeralNode = TLEphemeralNode()(ValName("temp"))
}
case class TLNexusNode(
clientFn: Seq[TLMasterPortParameters] => TLMasterPortParameters,
managerFn: Seq[TLSlavePortParameters] => TLSlavePortParameters)(
implicit valName: ValName)
extends NexusNode(TLImp)(clientFn, managerFn) with TLFormatNode
abstract class TLCustomNode(implicit valName: ValName)
extends CustomNode(TLImp) with TLFormatNode
// Asynchronous crossings
trait TLAsyncFormatNode extends FormatNode[TLAsyncEdgeParameters, TLAsyncEdgeParameters]
object TLAsyncImp extends SimpleNodeImp[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncEdgeParameters, TLAsyncBundle]
{
def edge(pd: TLAsyncClientPortParameters, pu: TLAsyncManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLAsyncEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLAsyncEdgeParameters) = new TLAsyncBundle(e.bundle)
def render(e: TLAsyncEdgeParameters) = RenderedEdge(colour = "#ff0000" /* red */, label = e.manager.async.depth.toString)
override def mixO(pd: TLAsyncClientPortParameters, node: OutwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLAsyncManagerPortParameters, node: InwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLAsyncAdapterNode(
clientFn: TLAsyncClientPortParameters => TLAsyncClientPortParameters = { s => s },
managerFn: TLAsyncManagerPortParameters => TLAsyncManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLAsyncImp)(clientFn, managerFn) with TLAsyncFormatNode
case class TLAsyncIdentityNode()(implicit valName: ValName) extends IdentityNode(TLAsyncImp)() with TLAsyncFormatNode
object TLAsyncNameNode {
def apply(name: ValName) = TLAsyncIdentityNode()(name)
def apply(name: Option[String]): TLAsyncIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLAsyncIdentityNode = apply(Some(name))
}
case class TLAsyncSourceNode(sync: Option[Int])(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLAsyncImp)(
dFn = { p => TLAsyncClientPortParameters(p) },
uFn = { p => p.base.v1copy(minLatency = p.base.minLatency + sync.getOrElse(p.async.sync)) }) with FormatNode[TLEdgeIn, TLAsyncEdgeParameters] // discard cycles in other clock domain
case class TLAsyncSinkNode(async: AsyncQueueParams)(implicit valName: ValName)
extends MixedAdapterNode(TLAsyncImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = p.base.minLatency + async.sync) },
uFn = { p => TLAsyncManagerPortParameters(async, p) }) with FormatNode[TLAsyncEdgeParameters, TLEdgeOut]
// Rationally related crossings
trait TLRationalFormatNode extends FormatNode[TLRationalEdgeParameters, TLRationalEdgeParameters]
object TLRationalImp extends SimpleNodeImp[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalEdgeParameters, TLRationalBundle]
{
def edge(pd: TLRationalClientPortParameters, pu: TLRationalManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLRationalEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLRationalEdgeParameters) = new TLRationalBundle(e.bundle)
def render(e: TLRationalEdgeParameters) = RenderedEdge(colour = "#00ff00" /* green */)
override def mixO(pd: TLRationalClientPortParameters, node: OutwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLRationalManagerPortParameters, node: InwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLRationalAdapterNode(
clientFn: TLRationalClientPortParameters => TLRationalClientPortParameters = { s => s },
managerFn: TLRationalManagerPortParameters => TLRationalManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLRationalImp)(clientFn, managerFn) with TLRationalFormatNode
case class TLRationalIdentityNode()(implicit valName: ValName) extends IdentityNode(TLRationalImp)() with TLRationalFormatNode
object TLRationalNameNode {
def apply(name: ValName) = TLRationalIdentityNode()(name)
def apply(name: Option[String]): TLRationalIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLRationalIdentityNode = apply(Some(name))
}
case class TLRationalSourceNode()(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLRationalImp)(
dFn = { p => TLRationalClientPortParameters(p) },
uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLRationalEdgeParameters] // discard cycles from other clock domain
case class TLRationalSinkNode(direction: RationalDirection)(implicit valName: ValName)
extends MixedAdapterNode(TLRationalImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = 1) },
uFn = { p => TLRationalManagerPortParameters(direction, p) }) with FormatNode[TLRationalEdgeParameters, TLEdgeOut]
// Credited version of TileLink channels
trait TLCreditedFormatNode extends FormatNode[TLCreditedEdgeParameters, TLCreditedEdgeParameters]
object TLCreditedImp extends SimpleNodeImp[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedEdgeParameters, TLCreditedBundle]
{
def edge(pd: TLCreditedClientPortParameters, pu: TLCreditedManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLCreditedEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLCreditedEdgeParameters) = new TLCreditedBundle(e.bundle)
def render(e: TLCreditedEdgeParameters) = RenderedEdge(colour = "#ffff00" /* yellow */, e.delay.toString)
override def mixO(pd: TLCreditedClientPortParameters, node: OutwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLCreditedManagerPortParameters, node: InwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLCreditedAdapterNode(
clientFn: TLCreditedClientPortParameters => TLCreditedClientPortParameters = { s => s },
managerFn: TLCreditedManagerPortParameters => TLCreditedManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLCreditedImp)(clientFn, managerFn) with TLCreditedFormatNode
case class TLCreditedIdentityNode()(implicit valName: ValName) extends IdentityNode(TLCreditedImp)() with TLCreditedFormatNode
object TLCreditedNameNode {
def apply(name: ValName) = TLCreditedIdentityNode()(name)
def apply(name: Option[String]): TLCreditedIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLCreditedIdentityNode = apply(Some(name))
}
case class TLCreditedSourceNode(delay: TLCreditedDelay)(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLCreditedImp)(
dFn = { p => TLCreditedClientPortParameters(delay, p) },
uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLCreditedEdgeParameters] // discard cycles from other clock domain
case class TLCreditedSinkNode(delay: TLCreditedDelay)(implicit valName: ValName)
extends MixedAdapterNode(TLCreditedImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = 1) },
uFn = { p => TLCreditedManagerPortParameters(delay, p) }) with FormatNode[TLCreditedEdgeParameters, TLEdgeOut]
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
File Parameters.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy.nodes._
import freechips.rocketchip.diplomacy.{
AddressDecoder, AddressSet, BufferParams, DirectedBuffers, IdMap, IdMapEntry,
IdRange, RegionType, TransferSizes
}
import freechips.rocketchip.resources.{Resource, ResourceAddress, ResourcePermissions}
import freechips.rocketchip.util.{
AsyncQueueParams, BundleField, BundleFieldBase, BundleKeyBase,
CreditedDelay, groupByIntoSeq, RationalDirection, SimpleProduct
}
import scala.math.max
//These transfer sizes describe requests issued from masters on the A channel that will be responded by slaves on the D channel
case class TLMasterToSlaveTransferSizes(
// Supports both Acquire+Release of the following two sizes:
acquireT: TransferSizes = TransferSizes.none,
acquireB: TransferSizes = TransferSizes.none,
arithmetic: TransferSizes = TransferSizes.none,
logical: TransferSizes = TransferSizes.none,
get: TransferSizes = TransferSizes.none,
putFull: TransferSizes = TransferSizes.none,
putPartial: TransferSizes = TransferSizes.none,
hint: TransferSizes = TransferSizes.none)
extends TLCommonTransferSizes {
def intersect(rhs: TLMasterToSlaveTransferSizes) = TLMasterToSlaveTransferSizes(
acquireT = acquireT .intersect(rhs.acquireT),
acquireB = acquireB .intersect(rhs.acquireB),
arithmetic = arithmetic.intersect(rhs.arithmetic),
logical = logical .intersect(rhs.logical),
get = get .intersect(rhs.get),
putFull = putFull .intersect(rhs.putFull),
putPartial = putPartial.intersect(rhs.putPartial),
hint = hint .intersect(rhs.hint))
def mincover(rhs: TLMasterToSlaveTransferSizes) = TLMasterToSlaveTransferSizes(
acquireT = acquireT .mincover(rhs.acquireT),
acquireB = acquireB .mincover(rhs.acquireB),
arithmetic = arithmetic.mincover(rhs.arithmetic),
logical = logical .mincover(rhs.logical),
get = get .mincover(rhs.get),
putFull = putFull .mincover(rhs.putFull),
putPartial = putPartial.mincover(rhs.putPartial),
hint = hint .mincover(rhs.hint))
// Reduce rendering to a simple yes/no per field
override def toString = {
def str(x: TransferSizes, flag: String) = if (x.none) "" else flag
def flags = Vector(
str(acquireT, "T"),
str(acquireB, "B"),
str(arithmetic, "A"),
str(logical, "L"),
str(get, "G"),
str(putFull, "F"),
str(putPartial, "P"),
str(hint, "H"))
flags.mkString
}
// Prints out the actual information in a user readable way
def infoString = {
s"""acquireT = ${acquireT}
|acquireB = ${acquireB}
|arithmetic = ${arithmetic}
|logical = ${logical}
|get = ${get}
|putFull = ${putFull}
|putPartial = ${putPartial}
|hint = ${hint}
|
|""".stripMargin
}
}
object TLMasterToSlaveTransferSizes {
def unknownEmits = TLMasterToSlaveTransferSizes(
acquireT = TransferSizes(1, 4096),
acquireB = TransferSizes(1, 4096),
arithmetic = TransferSizes(1, 4096),
logical = TransferSizes(1, 4096),
get = TransferSizes(1, 4096),
putFull = TransferSizes(1, 4096),
putPartial = TransferSizes(1, 4096),
hint = TransferSizes(1, 4096))
def unknownSupports = TLMasterToSlaveTransferSizes()
}
//These transfer sizes describe requests issued from slaves on the B channel that will be responded by masters on the C channel
case class TLSlaveToMasterTransferSizes(
probe: TransferSizes = TransferSizes.none,
arithmetic: TransferSizes = TransferSizes.none,
logical: TransferSizes = TransferSizes.none,
get: TransferSizes = TransferSizes.none,
putFull: TransferSizes = TransferSizes.none,
putPartial: TransferSizes = TransferSizes.none,
hint: TransferSizes = TransferSizes.none
) extends TLCommonTransferSizes {
def intersect(rhs: TLSlaveToMasterTransferSizes) = TLSlaveToMasterTransferSizes(
probe = probe .intersect(rhs.probe),
arithmetic = arithmetic.intersect(rhs.arithmetic),
logical = logical .intersect(rhs.logical),
get = get .intersect(rhs.get),
putFull = putFull .intersect(rhs.putFull),
putPartial = putPartial.intersect(rhs.putPartial),
hint = hint .intersect(rhs.hint)
)
def mincover(rhs: TLSlaveToMasterTransferSizes) = TLSlaveToMasterTransferSizes(
probe = probe .mincover(rhs.probe),
arithmetic = arithmetic.mincover(rhs.arithmetic),
logical = logical .mincover(rhs.logical),
get = get .mincover(rhs.get),
putFull = putFull .mincover(rhs.putFull),
putPartial = putPartial.mincover(rhs.putPartial),
hint = hint .mincover(rhs.hint)
)
// Reduce rendering to a simple yes/no per field
override def toString = {
def str(x: TransferSizes, flag: String) = if (x.none) "" else flag
def flags = Vector(
str(probe, "P"),
str(arithmetic, "A"),
str(logical, "L"),
str(get, "G"),
str(putFull, "F"),
str(putPartial, "P"),
str(hint, "H"))
flags.mkString
}
// Prints out the actual information in a user readable way
def infoString = {
s"""probe = ${probe}
|arithmetic = ${arithmetic}
|logical = ${logical}
|get = ${get}
|putFull = ${putFull}
|putPartial = ${putPartial}
|hint = ${hint}
|
|""".stripMargin
}
}
object TLSlaveToMasterTransferSizes {
def unknownEmits = TLSlaveToMasterTransferSizes(
arithmetic = TransferSizes(1, 4096),
logical = TransferSizes(1, 4096),
get = TransferSizes(1, 4096),
putFull = TransferSizes(1, 4096),
putPartial = TransferSizes(1, 4096),
hint = TransferSizes(1, 4096),
probe = TransferSizes(1, 4096))
def unknownSupports = TLSlaveToMasterTransferSizes()
}
trait TLCommonTransferSizes {
def arithmetic: TransferSizes
def logical: TransferSizes
def get: TransferSizes
def putFull: TransferSizes
def putPartial: TransferSizes
def hint: TransferSizes
}
class TLSlaveParameters private(
val nodePath: Seq[BaseNode],
val resources: Seq[Resource],
setName: Option[String],
val address: Seq[AddressSet],
val regionType: RegionType.T,
val executable: Boolean,
val fifoId: Option[Int],
val supports: TLMasterToSlaveTransferSizes,
val emits: TLSlaveToMasterTransferSizes,
// By default, slaves are forbidden from issuing 'denied' responses (it prevents Fragmentation)
val alwaysGrantsT: Boolean, // typically only true for CacheCork'd read-write devices; dual: neverReleaseData
// If fifoId=Some, all accesses sent to the same fifoId are executed and ACK'd in FIFO order
// Note: you can only rely on this FIFO behaviour if your TLMasterParameters include requestFifo
val mayDenyGet: Boolean, // applies to: AccessAckData, GrantData
val mayDenyPut: Boolean) // applies to: AccessAck, Grant, HintAck
// ReleaseAck may NEVER be denied
extends SimpleProduct
{
def sortedAddress = address.sorted
override def canEqual(that: Any): Boolean = that.isInstanceOf[TLSlaveParameters]
override def productPrefix = "TLSlaveParameters"
// We intentionally omit nodePath for equality testing / formatting
def productArity: Int = 11
def productElement(n: Int): Any = n match {
case 0 => name
case 1 => address
case 2 => resources
case 3 => regionType
case 4 => executable
case 5 => fifoId
case 6 => supports
case 7 => emits
case 8 => alwaysGrantsT
case 9 => mayDenyGet
case 10 => mayDenyPut
case _ => throw new IndexOutOfBoundsException(n.toString)
}
def supportsAcquireT: TransferSizes = supports.acquireT
def supportsAcquireB: TransferSizes = supports.acquireB
def supportsArithmetic: TransferSizes = supports.arithmetic
def supportsLogical: TransferSizes = supports.logical
def supportsGet: TransferSizes = supports.get
def supportsPutFull: TransferSizes = supports.putFull
def supportsPutPartial: TransferSizes = supports.putPartial
def supportsHint: TransferSizes = supports.hint
require (!address.isEmpty, "Address cannot be empty")
address.foreach { a => require (a.finite, "Address must be finite") }
address.combinations(2).foreach { case Seq(x,y) => require (!x.overlaps(y), s"$x and $y overlap.") }
require (supportsPutFull.contains(supportsPutPartial), s"PutFull($supportsPutFull) < PutPartial($supportsPutPartial)")
require (supportsPutFull.contains(supportsArithmetic), s"PutFull($supportsPutFull) < Arithmetic($supportsArithmetic)")
require (supportsPutFull.contains(supportsLogical), s"PutFull($supportsPutFull) < Logical($supportsLogical)")
require (supportsGet.contains(supportsArithmetic), s"Get($supportsGet) < Arithmetic($supportsArithmetic)")
require (supportsGet.contains(supportsLogical), s"Get($supportsGet) < Logical($supportsLogical)")
require (supportsAcquireB.contains(supportsAcquireT), s"AcquireB($supportsAcquireB) < AcquireT($supportsAcquireT)")
require (!alwaysGrantsT || supportsAcquireT, s"Must supportAcquireT if promising to always grantT")
// Make sure that the regionType agrees with the capabilities
require (!supportsAcquireB || regionType >= RegionType.UNCACHED) // acquire -> uncached, tracked, cached
require (regionType <= RegionType.UNCACHED || supportsAcquireB) // tracked, cached -> acquire
require (regionType != RegionType.UNCACHED || supportsGet) // uncached -> supportsGet
val name = setName.orElse(nodePath.lastOption.map(_.lazyModule.name)).getOrElse("disconnected")
val maxTransfer = List( // Largest supported transfer of all types
supportsAcquireT.max,
supportsAcquireB.max,
supportsArithmetic.max,
supportsLogical.max,
supportsGet.max,
supportsPutFull.max,
supportsPutPartial.max).max
val maxAddress = address.map(_.max).max
val minAlignment = address.map(_.alignment).min
// The device had better not support a transfer larger than its alignment
require (minAlignment >= maxTransfer, s"Bad $address: minAlignment ($minAlignment) must be >= maxTransfer ($maxTransfer)")
def toResource: ResourceAddress = {
ResourceAddress(address, ResourcePermissions(
r = supportsAcquireB || supportsGet,
w = supportsAcquireT || supportsPutFull,
x = executable,
c = supportsAcquireB,
a = supportsArithmetic && supportsLogical))
}
def findTreeViolation() = nodePath.find {
case _: MixedAdapterNode[_, _, _, _, _, _, _, _] => false
case _: SinkNode[_, _, _, _, _] => false
case node => node.inputs.size != 1
}
def isTree = findTreeViolation() == None
def infoString = {
s"""Slave Name = ${name}
|Slave Address = ${address}
|supports = ${supports.infoString}
|
|""".stripMargin
}
def v1copy(
address: Seq[AddressSet] = address,
resources: Seq[Resource] = resources,
regionType: RegionType.T = regionType,
executable: Boolean = executable,
nodePath: Seq[BaseNode] = nodePath,
supportsAcquireT: TransferSizes = supports.acquireT,
supportsAcquireB: TransferSizes = supports.acquireB,
supportsArithmetic: TransferSizes = supports.arithmetic,
supportsLogical: TransferSizes = supports.logical,
supportsGet: TransferSizes = supports.get,
supportsPutFull: TransferSizes = supports.putFull,
supportsPutPartial: TransferSizes = supports.putPartial,
supportsHint: TransferSizes = supports.hint,
mayDenyGet: Boolean = mayDenyGet,
mayDenyPut: Boolean = mayDenyPut,
alwaysGrantsT: Boolean = alwaysGrantsT,
fifoId: Option[Int] = fifoId) =
{
new TLSlaveParameters(
setName = setName,
address = address,
resources = resources,
regionType = regionType,
executable = executable,
nodePath = nodePath,
supports = TLMasterToSlaveTransferSizes(
acquireT = supportsAcquireT,
acquireB = supportsAcquireB,
arithmetic = supportsArithmetic,
logical = supportsLogical,
get = supportsGet,
putFull = supportsPutFull,
putPartial = supportsPutPartial,
hint = supportsHint),
emits = emits,
mayDenyGet = mayDenyGet,
mayDenyPut = mayDenyPut,
alwaysGrantsT = alwaysGrantsT,
fifoId = fifoId)
}
def v2copy(
nodePath: Seq[BaseNode] = nodePath,
resources: Seq[Resource] = resources,
name: Option[String] = setName,
address: Seq[AddressSet] = address,
regionType: RegionType.T = regionType,
executable: Boolean = executable,
fifoId: Option[Int] = fifoId,
supports: TLMasterToSlaveTransferSizes = supports,
emits: TLSlaveToMasterTransferSizes = emits,
alwaysGrantsT: Boolean = alwaysGrantsT,
mayDenyGet: Boolean = mayDenyGet,
mayDenyPut: Boolean = mayDenyPut) =
{
new TLSlaveParameters(
nodePath = nodePath,
resources = resources,
setName = name,
address = address,
regionType = regionType,
executable = executable,
fifoId = fifoId,
supports = supports,
emits = emits,
alwaysGrantsT = alwaysGrantsT,
mayDenyGet = mayDenyGet,
mayDenyPut = mayDenyPut)
}
@deprecated("Use v1copy instead of copy","")
def copy(
address: Seq[AddressSet] = address,
resources: Seq[Resource] = resources,
regionType: RegionType.T = regionType,
executable: Boolean = executable,
nodePath: Seq[BaseNode] = nodePath,
supportsAcquireT: TransferSizes = supports.acquireT,
supportsAcquireB: TransferSizes = supports.acquireB,
supportsArithmetic: TransferSizes = supports.arithmetic,
supportsLogical: TransferSizes = supports.logical,
supportsGet: TransferSizes = supports.get,
supportsPutFull: TransferSizes = supports.putFull,
supportsPutPartial: TransferSizes = supports.putPartial,
supportsHint: TransferSizes = supports.hint,
mayDenyGet: Boolean = mayDenyGet,
mayDenyPut: Boolean = mayDenyPut,
alwaysGrantsT: Boolean = alwaysGrantsT,
fifoId: Option[Int] = fifoId) =
{
v1copy(
address = address,
resources = resources,
regionType = regionType,
executable = executable,
nodePath = nodePath,
supportsAcquireT = supportsAcquireT,
supportsAcquireB = supportsAcquireB,
supportsArithmetic = supportsArithmetic,
supportsLogical = supportsLogical,
supportsGet = supportsGet,
supportsPutFull = supportsPutFull,
supportsPutPartial = supportsPutPartial,
supportsHint = supportsHint,
mayDenyGet = mayDenyGet,
mayDenyPut = mayDenyPut,
alwaysGrantsT = alwaysGrantsT,
fifoId = fifoId)
}
}
object TLSlaveParameters {
def v1(
address: Seq[AddressSet],
resources: Seq[Resource] = Seq(),
regionType: RegionType.T = RegionType.GET_EFFECTS,
executable: Boolean = false,
nodePath: Seq[BaseNode] = Seq(),
supportsAcquireT: TransferSizes = TransferSizes.none,
supportsAcquireB: TransferSizes = TransferSizes.none,
supportsArithmetic: TransferSizes = TransferSizes.none,
supportsLogical: TransferSizes = TransferSizes.none,
supportsGet: TransferSizes = TransferSizes.none,
supportsPutFull: TransferSizes = TransferSizes.none,
supportsPutPartial: TransferSizes = TransferSizes.none,
supportsHint: TransferSizes = TransferSizes.none,
mayDenyGet: Boolean = false,
mayDenyPut: Boolean = false,
alwaysGrantsT: Boolean = false,
fifoId: Option[Int] = None) =
{
new TLSlaveParameters(
setName = None,
address = address,
resources = resources,
regionType = regionType,
executable = executable,
nodePath = nodePath,
supports = TLMasterToSlaveTransferSizes(
acquireT = supportsAcquireT,
acquireB = supportsAcquireB,
arithmetic = supportsArithmetic,
logical = supportsLogical,
get = supportsGet,
putFull = supportsPutFull,
putPartial = supportsPutPartial,
hint = supportsHint),
emits = TLSlaveToMasterTransferSizes.unknownEmits,
mayDenyGet = mayDenyGet,
mayDenyPut = mayDenyPut,
alwaysGrantsT = alwaysGrantsT,
fifoId = fifoId)
}
def v2(
address: Seq[AddressSet],
nodePath: Seq[BaseNode] = Seq(),
resources: Seq[Resource] = Seq(),
name: Option[String] = None,
regionType: RegionType.T = RegionType.GET_EFFECTS,
executable: Boolean = false,
fifoId: Option[Int] = None,
supports: TLMasterToSlaveTransferSizes = TLMasterToSlaveTransferSizes.unknownSupports,
emits: TLSlaveToMasterTransferSizes = TLSlaveToMasterTransferSizes.unknownEmits,
alwaysGrantsT: Boolean = false,
mayDenyGet: Boolean = false,
mayDenyPut: Boolean = false) =
{
new TLSlaveParameters(
nodePath = nodePath,
resources = resources,
setName = name,
address = address,
regionType = regionType,
executable = executable,
fifoId = fifoId,
supports = supports,
emits = emits,
alwaysGrantsT = alwaysGrantsT,
mayDenyGet = mayDenyGet,
mayDenyPut = mayDenyPut)
}
}
object TLManagerParameters {
@deprecated("Use TLSlaveParameters.v1 instead of TLManagerParameters","")
def apply(
address: Seq[AddressSet],
resources: Seq[Resource] = Seq(),
regionType: RegionType.T = RegionType.GET_EFFECTS,
executable: Boolean = false,
nodePath: Seq[BaseNode] = Seq(),
supportsAcquireT: TransferSizes = TransferSizes.none,
supportsAcquireB: TransferSizes = TransferSizes.none,
supportsArithmetic: TransferSizes = TransferSizes.none,
supportsLogical: TransferSizes = TransferSizes.none,
supportsGet: TransferSizes = TransferSizes.none,
supportsPutFull: TransferSizes = TransferSizes.none,
supportsPutPartial: TransferSizes = TransferSizes.none,
supportsHint: TransferSizes = TransferSizes.none,
mayDenyGet: Boolean = false,
mayDenyPut: Boolean = false,
alwaysGrantsT: Boolean = false,
fifoId: Option[Int] = None) =
TLSlaveParameters.v1(
address,
resources,
regionType,
executable,
nodePath,
supportsAcquireT,
supportsAcquireB,
supportsArithmetic,
supportsLogical,
supportsGet,
supportsPutFull,
supportsPutPartial,
supportsHint,
mayDenyGet,
mayDenyPut,
alwaysGrantsT,
fifoId,
)
}
case class TLChannelBeatBytes(a: Option[Int], b: Option[Int], c: Option[Int], d: Option[Int])
{
def members = Seq(a, b, c, d)
members.collect { case Some(beatBytes) =>
require (isPow2(beatBytes), "Data channel width must be a power of 2")
}
}
object TLChannelBeatBytes{
def apply(beatBytes: Int): TLChannelBeatBytes = TLChannelBeatBytes(
Some(beatBytes),
Some(beatBytes),
Some(beatBytes),
Some(beatBytes))
def apply(): TLChannelBeatBytes = TLChannelBeatBytes(
None,
None,
None,
None)
}
class TLSlavePortParameters private(
val slaves: Seq[TLSlaveParameters],
val channelBytes: TLChannelBeatBytes,
val endSinkId: Int,
val minLatency: Int,
val responseFields: Seq[BundleFieldBase],
val requestKeys: Seq[BundleKeyBase]) extends SimpleProduct
{
def sortedSlaves = slaves.sortBy(_.sortedAddress.head)
override def canEqual(that: Any): Boolean = that.isInstanceOf[TLSlavePortParameters]
override def productPrefix = "TLSlavePortParameters"
def productArity: Int = 6
def productElement(n: Int): Any = n match {
case 0 => slaves
case 1 => channelBytes
case 2 => endSinkId
case 3 => minLatency
case 4 => responseFields
case 5 => requestKeys
case _ => throw new IndexOutOfBoundsException(n.toString)
}
require (!slaves.isEmpty, "Slave ports must have slaves")
require (endSinkId >= 0, "Sink ids cannot be negative")
require (minLatency >= 0, "Minimum required latency cannot be negative")
// Using this API implies you cannot handle mixed-width busses
def beatBytes = {
channelBytes.members.foreach { width =>
require (width.isDefined && width == channelBytes.a)
}
channelBytes.a.get
}
// TODO this should be deprecated
def managers = slaves
def requireFifo(policy: TLFIFOFixer.Policy = TLFIFOFixer.allFIFO) = {
val relevant = slaves.filter(m => policy(m))
relevant.foreach { m =>
require(m.fifoId == relevant.head.fifoId, s"${m.name} had fifoId ${m.fifoId}, which was not homogeneous (${slaves.map(s => (s.name, s.fifoId))}) ")
}
}
// Bounds on required sizes
def maxAddress = slaves.map(_.maxAddress).max
def maxTransfer = slaves.map(_.maxTransfer).max
def mayDenyGet = slaves.exists(_.mayDenyGet)
def mayDenyPut = slaves.exists(_.mayDenyPut)
// Diplomatically determined operation sizes emitted by all outward Slaves
// as opposed to emits* which generate circuitry to check which specific addresses
val allEmitClaims = slaves.map(_.emits).reduce( _ intersect _)
// Operation Emitted by at least one outward Slaves
// as opposed to emits* which generate circuitry to check which specific addresses
val anyEmitClaims = slaves.map(_.emits).reduce(_ mincover _)
// Diplomatically determined operation sizes supported by all outward Slaves
// as opposed to supports* which generate circuitry to check which specific addresses
val allSupportClaims = slaves.map(_.supports).reduce( _ intersect _)
val allSupportAcquireT = allSupportClaims.acquireT
val allSupportAcquireB = allSupportClaims.acquireB
val allSupportArithmetic = allSupportClaims.arithmetic
val allSupportLogical = allSupportClaims.logical
val allSupportGet = allSupportClaims.get
val allSupportPutFull = allSupportClaims.putFull
val allSupportPutPartial = allSupportClaims.putPartial
val allSupportHint = allSupportClaims.hint
// Operation supported by at least one outward Slaves
// as opposed to supports* which generate circuitry to check which specific addresses
val anySupportClaims = slaves.map(_.supports).reduce(_ mincover _)
val anySupportAcquireT = !anySupportClaims.acquireT.none
val anySupportAcquireB = !anySupportClaims.acquireB.none
val anySupportArithmetic = !anySupportClaims.arithmetic.none
val anySupportLogical = !anySupportClaims.logical.none
val anySupportGet = !anySupportClaims.get.none
val anySupportPutFull = !anySupportClaims.putFull.none
val anySupportPutPartial = !anySupportClaims.putPartial.none
val anySupportHint = !anySupportClaims.hint.none
// Supporting Acquire means being routable for GrantAck
require ((endSinkId == 0) == !anySupportAcquireB)
// These return Option[TLSlaveParameters] for your convenience
def find(address: BigInt) = slaves.find(_.address.exists(_.contains(address)))
// The safe version will check the entire address
def findSafe(address: UInt) = VecInit(sortedSlaves.map(_.address.map(_.contains(address)).reduce(_ || _)))
// The fast version assumes the address is valid (you probably want fastProperty instead of this function)
def findFast(address: UInt) = {
val routingMask = AddressDecoder(slaves.map(_.address))
VecInit(sortedSlaves.map(_.address.map(_.widen(~routingMask)).distinct.map(_.contains(address)).reduce(_ || _)))
}
// Compute the simplest AddressSets that decide a key
def fastPropertyGroup[K](p: TLSlaveParameters => K): Seq[(K, Seq[AddressSet])] = {
val groups = groupByIntoSeq(sortedSlaves.map(m => (p(m), m.address)))( _._1).map { case (k, vs) =>
k -> vs.flatMap(_._2)
}
val reductionMask = AddressDecoder(groups.map(_._2))
groups.map { case (k, seq) => k -> AddressSet.unify(seq.map(_.widen(~reductionMask)).distinct) }
}
// Select a property
def fastProperty[K, D <: Data](address: UInt, p: TLSlaveParameters => K, d: K => D): D =
Mux1H(fastPropertyGroup(p).map { case (v, a) => (a.map(_.contains(address)).reduce(_||_), d(v)) })
// Note: returns the actual fifoId + 1 or 0 if None
def findFifoIdFast(address: UInt) = fastProperty(address, _.fifoId.map(_+1).getOrElse(0), (i:Int) => i.U)
def hasFifoIdFast(address: UInt) = fastProperty(address, _.fifoId.isDefined, (b:Boolean) => b.B)
// Does this Port manage this ID/address?
def containsSafe(address: UInt) = findSafe(address).reduce(_ || _)
private def addressHelper(
// setting safe to false indicates that all addresses are expected to be legal, which might reduce circuit complexity
safe: Boolean,
// member filters out the sizes being checked based on the opcode being emitted or supported
member: TLSlaveParameters => TransferSizes,
address: UInt,
lgSize: UInt,
// range provides a limit on the sizes that are expected to be evaluated, which might reduce circuit complexity
range: Option[TransferSizes]): Bool = {
// trim reduces circuit complexity by intersecting checked sizes with the range argument
def trim(x: TransferSizes) = range.map(_.intersect(x)).getOrElse(x)
// groupBy returns an unordered map, convert back to Seq and sort the result for determinism
// groupByIntoSeq is turning slaves into trimmed membership sizes
// We are grouping all the slaves by their transfer size where
// if they support the trimmed size then
// member is the type of transfer that you are looking for (What you are trying to filter on)
// When you consider membership, you are trimming the sizes to only the ones that you care about
// you are filtering the slaves based on both whether they support a particular opcode and the size
// Grouping the slaves based on the actual transfer size range they support
// intersecting the range and checking their membership
// FOR SUPPORTCASES instead of returning the list of slaves,
// you are returning a map from transfer size to the set of
// address sets that are supported for that transfer size
// find all the slaves that support a certain type of operation and then group their addresses by the supported size
// for every size there could be multiple address ranges
// safety is a trade off between checking between all possible addresses vs only the addresses
// that are known to have supported sizes
// the trade off is 'checking all addresses is a more expensive circuit but will always give you
// the right answer even if you give it an illegal address'
// the not safe version is a cheaper circuit but if you give it an illegal address then it might produce the wrong answer
// fast presumes address legality
// This groupByIntoSeq deterministically groups all address sets for which a given `member` transfer size applies.
// In the resulting Map of cases, the keys are transfer sizes and the values are all address sets which emit or support that size.
val supportCases = groupByIntoSeq(slaves)(m => trim(member(m))).map { case (k: TransferSizes, vs: Seq[TLSlaveParameters]) =>
k -> vs.flatMap(_.address)
}
// safe produces a circuit that compares against all possible addresses,
// whereas fast presumes that the address is legal but uses an efficient address decoder
val mask = if (safe) ~BigInt(0) else AddressDecoder(supportCases.map(_._2))
// Simplified creates the most concise possible representation of each cases' address sets based on the mask.
val simplified = supportCases.map { case (k, seq) => k -> AddressSet.unify(seq.map(_.widen(~mask)).distinct) }
simplified.map { case (s, a) =>
// s is a size, you are checking for this size either the size of the operation is in s
// We return an or-reduction of all the cases, checking whether any contains both the dynamic size and dynamic address on the wire.
((Some(s) == range).B || s.containsLg(lgSize)) &&
a.map(_.contains(address)).reduce(_||_)
}.foldLeft(false.B)(_||_)
}
def supportsAcquireTSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.acquireT, address, lgSize, range)
def supportsAcquireBSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.acquireB, address, lgSize, range)
def supportsArithmeticSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.arithmetic, address, lgSize, range)
def supportsLogicalSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.logical, address, lgSize, range)
def supportsGetSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.get, address, lgSize, range)
def supportsPutFullSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.putFull, address, lgSize, range)
def supportsPutPartialSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.putPartial, address, lgSize, range)
def supportsHintSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.hint, address, lgSize, range)
def supportsAcquireTFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.acquireT, address, lgSize, range)
def supportsAcquireBFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.acquireB, address, lgSize, range)
def supportsArithmeticFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.arithmetic, address, lgSize, range)
def supportsLogicalFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.logical, address, lgSize, range)
def supportsGetFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.get, address, lgSize, range)
def supportsPutFullFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.putFull, address, lgSize, range)
def supportsPutPartialFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.putPartial, address, lgSize, range)
def supportsHintFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.hint, address, lgSize, range)
def emitsProbeSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.probe, address, lgSize, range)
def emitsArithmeticSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.arithmetic, address, lgSize, range)
def emitsLogicalSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.logical, address, lgSize, range)
def emitsGetSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.get, address, lgSize, range)
def emitsPutFullSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.putFull, address, lgSize, range)
def emitsPutPartialSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.putPartial, address, lgSize, range)
def emitsHintSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.hint, address, lgSize, range)
def findTreeViolation() = slaves.flatMap(_.findTreeViolation()).headOption
def isTree = !slaves.exists(!_.isTree)
def infoString = "Slave Port Beatbytes = " + beatBytes + "\n" + "Slave Port MinLatency = " + minLatency + "\n\n" + slaves.map(_.infoString).mkString
def v1copy(
managers: Seq[TLSlaveParameters] = slaves,
beatBytes: Int = -1,
endSinkId: Int = endSinkId,
minLatency: Int = minLatency,
responseFields: Seq[BundleFieldBase] = responseFields,
requestKeys: Seq[BundleKeyBase] = requestKeys) =
{
new TLSlavePortParameters(
slaves = managers,
channelBytes = if (beatBytes != -1) TLChannelBeatBytes(beatBytes) else channelBytes,
endSinkId = endSinkId,
minLatency = minLatency,
responseFields = responseFields,
requestKeys = requestKeys)
}
def v2copy(
slaves: Seq[TLSlaveParameters] = slaves,
channelBytes: TLChannelBeatBytes = channelBytes,
endSinkId: Int = endSinkId,
minLatency: Int = minLatency,
responseFields: Seq[BundleFieldBase] = responseFields,
requestKeys: Seq[BundleKeyBase] = requestKeys) =
{
new TLSlavePortParameters(
slaves = slaves,
channelBytes = channelBytes,
endSinkId = endSinkId,
minLatency = minLatency,
responseFields = responseFields,
requestKeys = requestKeys)
}
@deprecated("Use v1copy instead of copy","")
def copy(
managers: Seq[TLSlaveParameters] = slaves,
beatBytes: Int = -1,
endSinkId: Int = endSinkId,
minLatency: Int = minLatency,
responseFields: Seq[BundleFieldBase] = responseFields,
requestKeys: Seq[BundleKeyBase] = requestKeys) =
{
v1copy(
managers,
beatBytes,
endSinkId,
minLatency,
responseFields,
requestKeys)
}
}
object TLSlavePortParameters {
def v1(
managers: Seq[TLSlaveParameters],
beatBytes: Int,
endSinkId: Int = 0,
minLatency: Int = 0,
responseFields: Seq[BundleFieldBase] = Nil,
requestKeys: Seq[BundleKeyBase] = Nil) =
{
new TLSlavePortParameters(
slaves = managers,
channelBytes = TLChannelBeatBytes(beatBytes),
endSinkId = endSinkId,
minLatency = minLatency,
responseFields = responseFields,
requestKeys = requestKeys)
}
}
object TLManagerPortParameters {
@deprecated("Use TLSlavePortParameters.v1 instead of TLManagerPortParameters","")
def apply(
managers: Seq[TLSlaveParameters],
beatBytes: Int,
endSinkId: Int = 0,
minLatency: Int = 0,
responseFields: Seq[BundleFieldBase] = Nil,
requestKeys: Seq[BundleKeyBase] = Nil) =
{
TLSlavePortParameters.v1(
managers,
beatBytes,
endSinkId,
minLatency,
responseFields,
requestKeys)
}
}
class TLMasterParameters private(
val nodePath: Seq[BaseNode],
val resources: Seq[Resource],
val name: String,
val visibility: Seq[AddressSet],
val unusedRegionTypes: Set[RegionType.T],
val executesOnly: Boolean,
val requestFifo: Boolean, // only a request, not a requirement. applies to A, not C.
val supports: TLSlaveToMasterTransferSizes,
val emits: TLMasterToSlaveTransferSizes,
val neverReleasesData: Boolean,
val sourceId: IdRange) extends SimpleProduct
{
override def canEqual(that: Any): Boolean = that.isInstanceOf[TLMasterParameters]
override def productPrefix = "TLMasterParameters"
// We intentionally omit nodePath for equality testing / formatting
def productArity: Int = 10
def productElement(n: Int): Any = n match {
case 0 => name
case 1 => sourceId
case 2 => resources
case 3 => visibility
case 4 => unusedRegionTypes
case 5 => executesOnly
case 6 => requestFifo
case 7 => supports
case 8 => emits
case 9 => neverReleasesData
case _ => throw new IndexOutOfBoundsException(n.toString)
}
require (!sourceId.isEmpty)
require (!visibility.isEmpty)
require (supports.putFull.contains(supports.putPartial))
// We only support these operations if we support Probe (ie: we're a cache)
require (supports.probe.contains(supports.arithmetic))
require (supports.probe.contains(supports.logical))
require (supports.probe.contains(supports.get))
require (supports.probe.contains(supports.putFull))
require (supports.probe.contains(supports.putPartial))
require (supports.probe.contains(supports.hint))
visibility.combinations(2).foreach { case Seq(x,y) => require (!x.overlaps(y), s"$x and $y overlap.") }
val maxTransfer = List(
supports.probe.max,
supports.arithmetic.max,
supports.logical.max,
supports.get.max,
supports.putFull.max,
supports.putPartial.max).max
def infoString = {
s"""Master Name = ${name}
|visibility = ${visibility}
|emits = ${emits.infoString}
|sourceId = ${sourceId}
|
|""".stripMargin
}
def v1copy(
name: String = name,
sourceId: IdRange = sourceId,
nodePath: Seq[BaseNode] = nodePath,
requestFifo: Boolean = requestFifo,
visibility: Seq[AddressSet] = visibility,
supportsProbe: TransferSizes = supports.probe,
supportsArithmetic: TransferSizes = supports.arithmetic,
supportsLogical: TransferSizes = supports.logical,
supportsGet: TransferSizes = supports.get,
supportsPutFull: TransferSizes = supports.putFull,
supportsPutPartial: TransferSizes = supports.putPartial,
supportsHint: TransferSizes = supports.hint) =
{
new TLMasterParameters(
nodePath = nodePath,
resources = this.resources,
name = name,
visibility = visibility,
unusedRegionTypes = this.unusedRegionTypes,
executesOnly = this.executesOnly,
requestFifo = requestFifo,
supports = TLSlaveToMasterTransferSizes(
probe = supportsProbe,
arithmetic = supportsArithmetic,
logical = supportsLogical,
get = supportsGet,
putFull = supportsPutFull,
putPartial = supportsPutPartial,
hint = supportsHint),
emits = this.emits,
neverReleasesData = this.neverReleasesData,
sourceId = sourceId)
}
def v2copy(
nodePath: Seq[BaseNode] = nodePath,
resources: Seq[Resource] = resources,
name: String = name,
visibility: Seq[AddressSet] = visibility,
unusedRegionTypes: Set[RegionType.T] = unusedRegionTypes,
executesOnly: Boolean = executesOnly,
requestFifo: Boolean = requestFifo,
supports: TLSlaveToMasterTransferSizes = supports,
emits: TLMasterToSlaveTransferSizes = emits,
neverReleasesData: Boolean = neverReleasesData,
sourceId: IdRange = sourceId) =
{
new TLMasterParameters(
nodePath = nodePath,
resources = resources,
name = name,
visibility = visibility,
unusedRegionTypes = unusedRegionTypes,
executesOnly = executesOnly,
requestFifo = requestFifo,
supports = supports,
emits = emits,
neverReleasesData = neverReleasesData,
sourceId = sourceId)
}
@deprecated("Use v1copy instead of copy","")
def copy(
name: String = name,
sourceId: IdRange = sourceId,
nodePath: Seq[BaseNode] = nodePath,
requestFifo: Boolean = requestFifo,
visibility: Seq[AddressSet] = visibility,
supportsProbe: TransferSizes = supports.probe,
supportsArithmetic: TransferSizes = supports.arithmetic,
supportsLogical: TransferSizes = supports.logical,
supportsGet: TransferSizes = supports.get,
supportsPutFull: TransferSizes = supports.putFull,
supportsPutPartial: TransferSizes = supports.putPartial,
supportsHint: TransferSizes = supports.hint) =
{
v1copy(
name = name,
sourceId = sourceId,
nodePath = nodePath,
requestFifo = requestFifo,
visibility = visibility,
supportsProbe = supportsProbe,
supportsArithmetic = supportsArithmetic,
supportsLogical = supportsLogical,
supportsGet = supportsGet,
supportsPutFull = supportsPutFull,
supportsPutPartial = supportsPutPartial,
supportsHint = supportsHint)
}
}
object TLMasterParameters {
def v1(
name: String,
sourceId: IdRange = IdRange(0,1),
nodePath: Seq[BaseNode] = Seq(),
requestFifo: Boolean = false,
visibility: Seq[AddressSet] = Seq(AddressSet(0, ~0)),
supportsProbe: TransferSizes = TransferSizes.none,
supportsArithmetic: TransferSizes = TransferSizes.none,
supportsLogical: TransferSizes = TransferSizes.none,
supportsGet: TransferSizes = TransferSizes.none,
supportsPutFull: TransferSizes = TransferSizes.none,
supportsPutPartial: TransferSizes = TransferSizes.none,
supportsHint: TransferSizes = TransferSizes.none) =
{
new TLMasterParameters(
nodePath = nodePath,
resources = Nil,
name = name,
visibility = visibility,
unusedRegionTypes = Set(),
executesOnly = false,
requestFifo = requestFifo,
supports = TLSlaveToMasterTransferSizes(
probe = supportsProbe,
arithmetic = supportsArithmetic,
logical = supportsLogical,
get = supportsGet,
putFull = supportsPutFull,
putPartial = supportsPutPartial,
hint = supportsHint),
emits = TLMasterToSlaveTransferSizes.unknownEmits,
neverReleasesData = false,
sourceId = sourceId)
}
def v2(
nodePath: Seq[BaseNode] = Seq(),
resources: Seq[Resource] = Nil,
name: String,
visibility: Seq[AddressSet] = Seq(AddressSet(0, ~0)),
unusedRegionTypes: Set[RegionType.T] = Set(),
executesOnly: Boolean = false,
requestFifo: Boolean = false,
supports: TLSlaveToMasterTransferSizes = TLSlaveToMasterTransferSizes.unknownSupports,
emits: TLMasterToSlaveTransferSizes = TLMasterToSlaveTransferSizes.unknownEmits,
neverReleasesData: Boolean = false,
sourceId: IdRange = IdRange(0,1)) =
{
new TLMasterParameters(
nodePath = nodePath,
resources = resources,
name = name,
visibility = visibility,
unusedRegionTypes = unusedRegionTypes,
executesOnly = executesOnly,
requestFifo = requestFifo,
supports = supports,
emits = emits,
neverReleasesData = neverReleasesData,
sourceId = sourceId)
}
}
object TLClientParameters {
@deprecated("Use TLMasterParameters.v1 instead of TLClientParameters","")
def apply(
name: String,
sourceId: IdRange = IdRange(0,1),
nodePath: Seq[BaseNode] = Seq(),
requestFifo: Boolean = false,
visibility: Seq[AddressSet] = Seq(AddressSet.everything),
supportsProbe: TransferSizes = TransferSizes.none,
supportsArithmetic: TransferSizes = TransferSizes.none,
supportsLogical: TransferSizes = TransferSizes.none,
supportsGet: TransferSizes = TransferSizes.none,
supportsPutFull: TransferSizes = TransferSizes.none,
supportsPutPartial: TransferSizes = TransferSizes.none,
supportsHint: TransferSizes = TransferSizes.none) =
{
TLMasterParameters.v1(
name = name,
sourceId = sourceId,
nodePath = nodePath,
requestFifo = requestFifo,
visibility = visibility,
supportsProbe = supportsProbe,
supportsArithmetic = supportsArithmetic,
supportsLogical = supportsLogical,
supportsGet = supportsGet,
supportsPutFull = supportsPutFull,
supportsPutPartial = supportsPutPartial,
supportsHint = supportsHint)
}
}
class TLMasterPortParameters private(
val masters: Seq[TLMasterParameters],
val channelBytes: TLChannelBeatBytes,
val minLatency: Int,
val echoFields: Seq[BundleFieldBase],
val requestFields: Seq[BundleFieldBase],
val responseKeys: Seq[BundleKeyBase]) extends SimpleProduct
{
override def canEqual(that: Any): Boolean = that.isInstanceOf[TLMasterPortParameters]
override def productPrefix = "TLMasterPortParameters"
def productArity: Int = 6
def productElement(n: Int): Any = n match {
case 0 => masters
case 1 => channelBytes
case 2 => minLatency
case 3 => echoFields
case 4 => requestFields
case 5 => responseKeys
case _ => throw new IndexOutOfBoundsException(n.toString)
}
require (!masters.isEmpty)
require (minLatency >= 0)
def clients = masters
// Require disjoint ranges for Ids
IdRange.overlaps(masters.map(_.sourceId)).foreach { case (x, y) =>
require (!x.overlaps(y), s"TLClientParameters.sourceId ${x} overlaps ${y}")
}
// Bounds on required sizes
def endSourceId = masters.map(_.sourceId.end).max
def maxTransfer = masters.map(_.maxTransfer).max
// The unused sources < endSourceId
def unusedSources: Seq[Int] = {
val usedSources = masters.map(_.sourceId).sortBy(_.start)
((Seq(0) ++ usedSources.map(_.end)) zip usedSources.map(_.start)) flatMap { case (end, start) =>
end until start
}
}
// Diplomatically determined operation sizes emitted by all inward Masters
// as opposed to emits* which generate circuitry to check which specific addresses
val allEmitClaims = masters.map(_.emits).reduce( _ intersect _)
// Diplomatically determined operation sizes Emitted by at least one inward Masters
// as opposed to emits* which generate circuitry to check which specific addresses
val anyEmitClaims = masters.map(_.emits).reduce(_ mincover _)
// Diplomatically determined operation sizes supported by all inward Masters
// as opposed to supports* which generate circuitry to check which specific addresses
val allSupportProbe = masters.map(_.supports.probe) .reduce(_ intersect _)
val allSupportArithmetic = masters.map(_.supports.arithmetic).reduce(_ intersect _)
val allSupportLogical = masters.map(_.supports.logical) .reduce(_ intersect _)
val allSupportGet = masters.map(_.supports.get) .reduce(_ intersect _)
val allSupportPutFull = masters.map(_.supports.putFull) .reduce(_ intersect _)
val allSupportPutPartial = masters.map(_.supports.putPartial).reduce(_ intersect _)
val allSupportHint = masters.map(_.supports.hint) .reduce(_ intersect _)
// Diplomatically determined operation sizes supported by at least one master
// as opposed to supports* which generate circuitry to check which specific addresses
val anySupportProbe = masters.map(!_.supports.probe.none) .reduce(_ || _)
val anySupportArithmetic = masters.map(!_.supports.arithmetic.none).reduce(_ || _)
val anySupportLogical = masters.map(!_.supports.logical.none) .reduce(_ || _)
val anySupportGet = masters.map(!_.supports.get.none) .reduce(_ || _)
val anySupportPutFull = masters.map(!_.supports.putFull.none) .reduce(_ || _)
val anySupportPutPartial = masters.map(!_.supports.putPartial.none).reduce(_ || _)
val anySupportHint = masters.map(!_.supports.hint.none) .reduce(_ || _)
// These return Option[TLMasterParameters] for your convenience
def find(id: Int) = masters.find(_.sourceId.contains(id))
// Synthesizable lookup methods
def find(id: UInt) = VecInit(masters.map(_.sourceId.contains(id)))
def contains(id: UInt) = find(id).reduce(_ || _)
def requestFifo(id: UInt) = Mux1H(find(id), masters.map(c => c.requestFifo.B))
// Available during RTL runtime, checks to see if (id, size) is supported by the master's (client's) diplomatic parameters
private def sourceIdHelper(member: TLMasterParameters => TransferSizes)(id: UInt, lgSize: UInt) = {
val allSame = masters.map(member(_) == member(masters(0))).reduce(_ && _)
// this if statement is a coarse generalization of the groupBy in the sourceIdHelper2 version;
// the case where there is only one group.
if (allSame) member(masters(0)).containsLg(lgSize) else {
// Find the master associated with ID and returns whether that particular master is able to receive transaction of lgSize
Mux1H(find(id), masters.map(member(_).containsLg(lgSize)))
}
}
// Check for support of a given operation at a specific id
val supportsProbe = sourceIdHelper(_.supports.probe) _
val supportsArithmetic = sourceIdHelper(_.supports.arithmetic) _
val supportsLogical = sourceIdHelper(_.supports.logical) _
val supportsGet = sourceIdHelper(_.supports.get) _
val supportsPutFull = sourceIdHelper(_.supports.putFull) _
val supportsPutPartial = sourceIdHelper(_.supports.putPartial) _
val supportsHint = sourceIdHelper(_.supports.hint) _
// TODO: Merge sourceIdHelper2 with sourceIdHelper
private def sourceIdHelper2(
member: TLMasterParameters => TransferSizes,
sourceId: UInt,
lgSize: UInt): Bool = {
// Because sourceIds are uniquely owned by each master, we use them to group the
// cases that have to be checked.
val emitCases = groupByIntoSeq(masters)(m => member(m)).map { case (k, vs) =>
k -> vs.map(_.sourceId)
}
emitCases.map { case (s, a) =>
(s.containsLg(lgSize)) &&
a.map(_.contains(sourceId)).reduce(_||_)
}.foldLeft(false.B)(_||_)
}
// Check for emit of a given operation at a specific id
def emitsAcquireT (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.acquireT, sourceId, lgSize)
def emitsAcquireB (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.acquireB, sourceId, lgSize)
def emitsArithmetic(sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.arithmetic, sourceId, lgSize)
def emitsLogical (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.logical, sourceId, lgSize)
def emitsGet (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.get, sourceId, lgSize)
def emitsPutFull (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.putFull, sourceId, lgSize)
def emitsPutPartial(sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.putPartial, sourceId, lgSize)
def emitsHint (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.hint, sourceId, lgSize)
def infoString = masters.map(_.infoString).mkString
def v1copy(
clients: Seq[TLMasterParameters] = masters,
minLatency: Int = minLatency,
echoFields: Seq[BundleFieldBase] = echoFields,
requestFields: Seq[BundleFieldBase] = requestFields,
responseKeys: Seq[BundleKeyBase] = responseKeys) =
{
new TLMasterPortParameters(
masters = clients,
channelBytes = channelBytes,
minLatency = minLatency,
echoFields = echoFields,
requestFields = requestFields,
responseKeys = responseKeys)
}
def v2copy(
masters: Seq[TLMasterParameters] = masters,
channelBytes: TLChannelBeatBytes = channelBytes,
minLatency: Int = minLatency,
echoFields: Seq[BundleFieldBase] = echoFields,
requestFields: Seq[BundleFieldBase] = requestFields,
responseKeys: Seq[BundleKeyBase] = responseKeys) =
{
new TLMasterPortParameters(
masters = masters,
channelBytes = channelBytes,
minLatency = minLatency,
echoFields = echoFields,
requestFields = requestFields,
responseKeys = responseKeys)
}
@deprecated("Use v1copy instead of copy","")
def copy(
clients: Seq[TLMasterParameters] = masters,
minLatency: Int = minLatency,
echoFields: Seq[BundleFieldBase] = echoFields,
requestFields: Seq[BundleFieldBase] = requestFields,
responseKeys: Seq[BundleKeyBase] = responseKeys) =
{
v1copy(
clients,
minLatency,
echoFields,
requestFields,
responseKeys)
}
}
object TLClientPortParameters {
@deprecated("Use TLMasterPortParameters.v1 instead of TLClientPortParameters","")
def apply(
clients: Seq[TLMasterParameters],
minLatency: Int = 0,
echoFields: Seq[BundleFieldBase] = Nil,
requestFields: Seq[BundleFieldBase] = Nil,
responseKeys: Seq[BundleKeyBase] = Nil) =
{
TLMasterPortParameters.v1(
clients,
minLatency,
echoFields,
requestFields,
responseKeys)
}
}
object TLMasterPortParameters {
def v1(
clients: Seq[TLMasterParameters],
minLatency: Int = 0,
echoFields: Seq[BundleFieldBase] = Nil,
requestFields: Seq[BundleFieldBase] = Nil,
responseKeys: Seq[BundleKeyBase] = Nil) =
{
new TLMasterPortParameters(
masters = clients,
channelBytes = TLChannelBeatBytes(),
minLatency = minLatency,
echoFields = echoFields,
requestFields = requestFields,
responseKeys = responseKeys)
}
def v2(
masters: Seq[TLMasterParameters],
channelBytes: TLChannelBeatBytes = TLChannelBeatBytes(),
minLatency: Int = 0,
echoFields: Seq[BundleFieldBase] = Nil,
requestFields: Seq[BundleFieldBase] = Nil,
responseKeys: Seq[BundleKeyBase] = Nil) =
{
new TLMasterPortParameters(
masters = masters,
channelBytes = channelBytes,
minLatency = minLatency,
echoFields = echoFields,
requestFields = requestFields,
responseKeys = responseKeys)
}
}
case class TLBundleParameters(
addressBits: Int,
dataBits: Int,
sourceBits: Int,
sinkBits: Int,
sizeBits: Int,
echoFields: Seq[BundleFieldBase],
requestFields: Seq[BundleFieldBase],
responseFields: Seq[BundleFieldBase],
hasBCE: Boolean)
{
// Chisel has issues with 0-width wires
require (addressBits >= 1)
require (dataBits >= 8)
require (sourceBits >= 1)
require (sinkBits >= 1)
require (sizeBits >= 1)
require (isPow2(dataBits))
echoFields.foreach { f => require (f.key.isControl, s"${f} is not a legal echo field") }
val addrLoBits = log2Up(dataBits/8)
// Used to uniquify bus IP names
def shortName = s"a${addressBits}d${dataBits}s${sourceBits}k${sinkBits}z${sizeBits}" + (if (hasBCE) "c" else "u")
def union(x: TLBundleParameters) =
TLBundleParameters(
max(addressBits, x.addressBits),
max(dataBits, x.dataBits),
max(sourceBits, x.sourceBits),
max(sinkBits, x.sinkBits),
max(sizeBits, x.sizeBits),
echoFields = BundleField.union(echoFields ++ x.echoFields),
requestFields = BundleField.union(requestFields ++ x.requestFields),
responseFields = BundleField.union(responseFields ++ x.responseFields),
hasBCE || x.hasBCE)
}
object TLBundleParameters
{
val emptyBundleParams = TLBundleParameters(
addressBits = 1,
dataBits = 8,
sourceBits = 1,
sinkBits = 1,
sizeBits = 1,
echoFields = Nil,
requestFields = Nil,
responseFields = Nil,
hasBCE = false)
def union(x: Seq[TLBundleParameters]) = x.foldLeft(emptyBundleParams)((x,y) => x.union(y))
def apply(master: TLMasterPortParameters, slave: TLSlavePortParameters) =
new TLBundleParameters(
addressBits = log2Up(slave.maxAddress + 1),
dataBits = slave.beatBytes * 8,
sourceBits = log2Up(master.endSourceId),
sinkBits = log2Up(slave.endSinkId),
sizeBits = log2Up(log2Ceil(max(master.maxTransfer, slave.maxTransfer))+1),
echoFields = master.echoFields,
requestFields = BundleField.accept(master.requestFields, slave.requestKeys),
responseFields = BundleField.accept(slave.responseFields, master.responseKeys),
hasBCE = master.anySupportProbe && slave.anySupportAcquireB)
}
case class TLEdgeParameters(
master: TLMasterPortParameters,
slave: TLSlavePortParameters,
params: Parameters,
sourceInfo: SourceInfo) extends FormatEdge
{
// legacy names:
def manager = slave
def client = master
val maxTransfer = max(master.maxTransfer, slave.maxTransfer)
val maxLgSize = log2Ceil(maxTransfer)
// Sanity check the link...
require (maxTransfer >= slave.beatBytes, s"Link's max transfer (${maxTransfer}) < ${slave.slaves.map(_.name)}'s beatBytes (${slave.beatBytes})")
def diplomaticClaimsMasterToSlave = master.anyEmitClaims.intersect(slave.anySupportClaims)
val bundle = TLBundleParameters(master, slave)
def formatEdge = master.infoString + "\n" + slave.infoString
}
case class TLCreditedDelay(
a: CreditedDelay,
b: CreditedDelay,
c: CreditedDelay,
d: CreditedDelay,
e: CreditedDelay)
{
def + (that: TLCreditedDelay): TLCreditedDelay = TLCreditedDelay(
a = a + that.a,
b = b + that.b,
c = c + that.c,
d = d + that.d,
e = e + that.e)
override def toString = s"(${a}, ${b}, ${c}, ${d}, ${e})"
}
object TLCreditedDelay {
def apply(delay: CreditedDelay): TLCreditedDelay = apply(delay, delay.flip, delay, delay.flip, delay)
}
case class TLCreditedManagerPortParameters(delay: TLCreditedDelay, base: TLSlavePortParameters) {def infoString = base.infoString}
case class TLCreditedClientPortParameters(delay: TLCreditedDelay, base: TLMasterPortParameters) {def infoString = base.infoString}
case class TLCreditedEdgeParameters(client: TLCreditedClientPortParameters, manager: TLCreditedManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends FormatEdge
{
val delay = client.delay + manager.delay
val bundle = TLBundleParameters(client.base, manager.base)
def formatEdge = client.infoString + "\n" + manager.infoString
}
case class TLAsyncManagerPortParameters(async: AsyncQueueParams, base: TLSlavePortParameters) {def infoString = base.infoString}
case class TLAsyncClientPortParameters(base: TLMasterPortParameters) {def infoString = base.infoString}
case class TLAsyncBundleParameters(async: AsyncQueueParams, base: TLBundleParameters)
case class TLAsyncEdgeParameters(client: TLAsyncClientPortParameters, manager: TLAsyncManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends FormatEdge
{
val bundle = TLAsyncBundleParameters(manager.async, TLBundleParameters(client.base, manager.base))
def formatEdge = client.infoString + "\n" + manager.infoString
}
case class TLRationalManagerPortParameters(direction: RationalDirection, base: TLSlavePortParameters) {def infoString = base.infoString}
case class TLRationalClientPortParameters(base: TLMasterPortParameters) {def infoString = base.infoString}
case class TLRationalEdgeParameters(client: TLRationalClientPortParameters, manager: TLRationalManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends FormatEdge
{
val bundle = TLBundleParameters(client.base, manager.base)
def formatEdge = client.infoString + "\n" + manager.infoString
}
// To be unified, devices must agree on all of these terms
case class ManagerUnificationKey(
resources: Seq[Resource],
regionType: RegionType.T,
executable: Boolean,
supportsAcquireT: TransferSizes,
supportsAcquireB: TransferSizes,
supportsArithmetic: TransferSizes,
supportsLogical: TransferSizes,
supportsGet: TransferSizes,
supportsPutFull: TransferSizes,
supportsPutPartial: TransferSizes,
supportsHint: TransferSizes)
object ManagerUnificationKey
{
def apply(x: TLSlaveParameters): ManagerUnificationKey = ManagerUnificationKey(
resources = x.resources,
regionType = x.regionType,
executable = x.executable,
supportsAcquireT = x.supportsAcquireT,
supportsAcquireB = x.supportsAcquireB,
supportsArithmetic = x.supportsArithmetic,
supportsLogical = x.supportsLogical,
supportsGet = x.supportsGet,
supportsPutFull = x.supportsPutFull,
supportsPutPartial = x.supportsPutPartial,
supportsHint = x.supportsHint)
}
object ManagerUnification
{
def apply(slaves: Seq[TLSlaveParameters]): List[TLSlaveParameters] = {
slaves.groupBy(ManagerUnificationKey.apply).values.map { seq =>
val agree = seq.forall(_.fifoId == seq.head.fifoId)
seq(0).v1copy(
address = AddressSet.unify(seq.flatMap(_.address)),
fifoId = if (agree) seq(0).fifoId else None)
}.toList
}
}
case class TLBufferParams(
a: BufferParams = BufferParams.none,
b: BufferParams = BufferParams.none,
c: BufferParams = BufferParams.none,
d: BufferParams = BufferParams.none,
e: BufferParams = BufferParams.none
) extends DirectedBuffers[TLBufferParams] {
def copyIn(x: BufferParams) = this.copy(b = x, d = x)
def copyOut(x: BufferParams) = this.copy(a = x, c = x, e = x)
def copyInOut(x: BufferParams) = this.copyIn(x).copyOut(x)
}
/** Pretty printing of TL source id maps */
class TLSourceIdMap(tl: TLMasterPortParameters) extends IdMap[TLSourceIdMapEntry] {
private val tlDigits = String.valueOf(tl.endSourceId-1).length()
protected val fmt = s"\t[%${tlDigits}d, %${tlDigits}d) %s%s%s"
private val sorted = tl.masters.sortBy(_.sourceId)
val mapping: Seq[TLSourceIdMapEntry] = sorted.map { case c =>
TLSourceIdMapEntry(c.sourceId, c.name, c.supports.probe, c.requestFifo)
}
}
case class TLSourceIdMapEntry(tlId: IdRange, name: String, isCache: Boolean, requestFifo: Boolean)
extends IdMapEntry
{
val from = tlId
val to = tlId
val maxTransactionsInFlight = Some(tlId.size)
}
File MixedNode.scala:
package org.chipsalliance.diplomacy.nodes
import chisel3.{Data, DontCare, Wire}
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.{Field, Parameters}
import org.chipsalliance.diplomacy.ValName
import org.chipsalliance.diplomacy.sourceLine
/** One side metadata of a [[Dangle]].
*
* Describes one side of an edge going into or out of a [[BaseNode]].
*
* @param serial
* the global [[BaseNode.serial]] number of the [[BaseNode]] that this [[HalfEdge]] connects to.
* @param index
* the `index` in the [[BaseNode]]'s input or output port list that this [[HalfEdge]] belongs to.
*/
case class HalfEdge(serial: Int, index: Int) extends Ordered[HalfEdge] {
import scala.math.Ordered.orderingToOrdered
def compare(that: HalfEdge): Int = HalfEdge.unapply(this).compare(HalfEdge.unapply(that))
}
/** [[Dangle]] captures the `IO` information of a [[LazyModule]] and which two [[BaseNode]]s the [[Edges]]/[[Bundle]]
* connects.
*
* [[Dangle]]s are generated by [[BaseNode.instantiate]] using [[MixedNode.danglesOut]] and [[MixedNode.danglesIn]] ,
* [[LazyModuleImp.instantiate]] connects those that go to internal or explicit IO connections in a [[LazyModule]].
*
* @param source
* the source [[HalfEdge]] of this [[Dangle]], which captures the source [[BaseNode]] and the port `index` within
* that [[BaseNode]].
* @param sink
* sink [[HalfEdge]] of this [[Dangle]], which captures the sink [[BaseNode]] and the port `index` within that
* [[BaseNode]].
* @param flipped
* flip or not in [[AutoBundle.makeElements]]. If true this corresponds to `danglesOut`, if false it corresponds to
* `danglesIn`.
* @param dataOpt
* actual [[Data]] for the hardware connection. Can be empty if this belongs to a cloned module
*/
case class Dangle(source: HalfEdge, sink: HalfEdge, flipped: Boolean, name: String, dataOpt: Option[Data]) {
def data = dataOpt.get
}
/** [[Edges]] is a collection of parameters describing the functionality and connection for an interface, which is often
* derived from the interconnection protocol and can inform the parameterization of the hardware bundles that actually
* implement the protocol.
*/
case class Edges[EI, EO](in: Seq[EI], out: Seq[EO])
/** A field available in [[Parameters]] used to determine whether [[InwardNodeImp.monitor]] will be called. */
case object MonitorsEnabled extends Field[Boolean](true)
/** When rendering the edge in a graphical format, flip the order in which the edges' source and sink are presented.
*
* For example, when rendering graphML, yEd by default tries to put the source node vertically above the sink node, but
* [[RenderFlipped]] inverts this relationship. When a particular [[LazyModule]] contains both source nodes and sink
* nodes, flipping the rendering of one node's edge will usual produce a more concise visual layout for the
* [[LazyModule]].
*/
case object RenderFlipped extends Field[Boolean](false)
/** The sealed node class in the package, all node are derived from it.
*
* @param inner
* Sink interface implementation.
* @param outer
* Source interface implementation.
* @param valName
* val name of this node.
* @tparam DI
* Downward-flowing parameters received on the inner side of the node. It is usually a brunch of parameters
* describing the protocol parameters from a source. For an [[InwardNode]], it is determined by the connected
* [[OutwardNode]]. Since it can be connected to multiple sources, this parameter is always a Seq of source port
* parameters.
* @tparam UI
* Upward-flowing parameters generated by the inner side of the node. It is usually a brunch of parameters describing
* the protocol parameters of a sink. For an [[InwardNode]], it is determined itself.
* @tparam EI
* Edge Parameters describing a connection on the inner side of the node. It is usually a brunch of transfers
* specified for a sink according to protocol.
* @tparam BI
* Bundle type used when connecting to the inner side of the node. It is a hardware interface of this sink interface.
* It should extends from [[chisel3.Data]], which represents the real hardware.
* @tparam DO
* Downward-flowing parameters generated on the outer side of the node. It is usually a brunch of parameters
* describing the protocol parameters of a source. For an [[OutwardNode]], it is determined itself.
* @tparam UO
* Upward-flowing parameters received by the outer side of the node. It is usually a brunch of parameters describing
* the protocol parameters from a sink. For an [[OutwardNode]], it is determined by the connected [[InwardNode]].
* Since it can be connected to multiple sinks, this parameter is always a Seq of sink port parameters.
* @tparam EO
* Edge Parameters describing a connection on the outer side of the node. It is usually a brunch of transfers
* specified for a source according to protocol.
* @tparam BO
* Bundle type used when connecting to the outer side of the node. It is a hardware interface of this source
* interface. It should extends from [[chisel3.Data]], which represents the real hardware.
*
* @note
* Call Graph of [[MixedNode]]
* - line `─`: source is process by a function and generate pass to others
* - Arrow `→`: target of arrow is generated by source
*
* {{{
* (from the other node)
* ┌─────────────────────────────────────────────────────────[[InwardNode.uiParams]]─────────────┐
* ↓ │
* (binding node when elaboration) [[OutwardNode.uoParams]]────────────────────────[[MixedNode.mapParamsU]]→──────────┐ │
* [[InwardNode.accPI]] │ │ │
* │ │ (based on protocol) │
* │ │ [[MixedNode.inner.edgeI]] │
* │ │ ↓ │
* ↓ │ │ │
* (immobilize after elaboration) (inward port from [[OutwardNode]]) │ ↓ │
* [[InwardNode.iBindings]]──┐ [[MixedNode.iDirectPorts]]────────────────────→[[MixedNode.iPorts]] [[InwardNode.uiParams]] │
* │ │ ↑ │ │ │
* │ │ │ [[OutwardNode.doParams]] │ │
* │ │ │ (from the other node) │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* │ │ │ └────────┬──────────────┤ │
* │ │ │ │ │ │
* │ │ │ │ (based on protocol) │
* │ │ │ │ [[MixedNode.inner.edgeI]] │
* │ │ │ │ │ │
* │ │ (from the other node) │ ↓ │
* │ └───[[OutwardNode.oPortMapping]] [[OutwardNode.oStar]] │ [[MixedNode.edgesIn]]───┐ │
* │ ↑ ↑ │ │ ↓ │
* │ │ │ │ │ [[MixedNode.in]] │
* │ │ │ │ ↓ ↑ │
* │ (solve star connection) │ │ │ [[MixedNode.bundleIn]]──┘ │
* ├───[[MixedNode.resolveStar]]→─┼─────────────────────────────┤ └────────────────────────────────────┐ │
* │ │ │ [[MixedNode.bundleOut]]─┐ │ │
* │ │ │ ↑ ↓ │ │
* │ │ │ │ [[MixedNode.out]] │ │
* │ ↓ ↓ │ ↑ │ │
* │ ┌─────[[InwardNode.iPortMapping]] [[InwardNode.iStar]] [[MixedNode.edgesOut]]──┘ │ │
* │ │ (from the other node) ↑ │ │
* │ │ │ │ │ │
* │ │ │ [[MixedNode.outer.edgeO]] │ │
* │ │ │ (based on protocol) │ │
* │ │ │ │ │ │
* │ │ │ ┌────────────────────────────────────────┤ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* (immobilize after elaboration)│ ↓ │ │ │ │
* [[OutwardNode.oBindings]]─┘ [[MixedNode.oDirectPorts]]───→[[MixedNode.oPorts]] [[OutwardNode.doParams]] │ │
* ↑ (inward port from [[OutwardNode]]) │ │ │ │
* │ ┌─────────────────────────────────────────┤ │ │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* [[OutwardNode.accPO]] │ ↓ │ │ │
* (binding node when elaboration) │ [[InwardNode.diParams]]─────→[[MixedNode.mapParamsD]]────────────────────────────┘ │ │
* │ ↑ │ │
* │ └──────────────────────────────────────────────────────────────────────────────────────────┘ │
* └──────────────────────────────────────────────────────────────────────────────────────────────────────────┘
* }}}
*/
abstract class MixedNode[DI, UI, EI, BI <: Data, DO, UO, EO, BO <: Data](
val inner: InwardNodeImp[DI, UI, EI, BI],
val outer: OutwardNodeImp[DO, UO, EO, BO]
)(
implicit valName: ValName)
extends BaseNode
with NodeHandle[DI, UI, EI, BI, DO, UO, EO, BO]
with InwardNode[DI, UI, BI]
with OutwardNode[DO, UO, BO] {
// Generate a [[NodeHandle]] with inward and outward node are both this node.
val inward = this
val outward = this
/** Debug info of nodes binding. */
def bindingInfo: String = s"""$iBindingInfo
|$oBindingInfo
|""".stripMargin
/** Debug info of ports connecting. */
def connectedPortsInfo: String = s"""${oPorts.size} outward ports connected: [${oPorts.map(_._2.name).mkString(",")}]
|${iPorts.size} inward ports connected: [${iPorts.map(_._2.name).mkString(",")}]
|""".stripMargin
/** Debug info of parameters propagations. */
def parametersInfo: String = s"""${doParams.size} downstream outward parameters: [${doParams.mkString(",")}]
|${uoParams.size} upstream outward parameters: [${uoParams.mkString(",")}]
|${diParams.size} downstream inward parameters: [${diParams.mkString(",")}]
|${uiParams.size} upstream inward parameters: [${uiParams.mkString(",")}]
|""".stripMargin
/** For a given node, converts [[OutwardNode.accPO]] and [[InwardNode.accPI]] to [[MixedNode.oPortMapping]] and
* [[MixedNode.iPortMapping]].
*
* Given counts of known inward and outward binding and inward and outward star bindings, return the resolved inward
* stars and outward stars.
*
* This method will also validate the arguments and throw a runtime error if the values are unsuitable for this type
* of node.
*
* @param iKnown
* Number of known-size ([[BIND_ONCE]]) input bindings.
* @param oKnown
* Number of known-size ([[BIND_ONCE]]) output bindings.
* @param iStar
* Number of unknown size ([[BIND_STAR]]) input bindings.
* @param oStar
* Number of unknown size ([[BIND_STAR]]) output bindings.
* @return
* A Tuple of the resolved number of input and output connections.
*/
protected[diplomacy] def resolveStar(iKnown: Int, oKnown: Int, iStar: Int, oStar: Int): (Int, Int)
/** Function to generate downward-flowing outward params from the downward-flowing input params and the current output
* ports.
*
* @param n
* The size of the output sequence to generate.
* @param p
* Sequence of downward-flowing input parameters of this node.
* @return
* A `n`-sized sequence of downward-flowing output edge parameters.
*/
protected[diplomacy] def mapParamsD(n: Int, p: Seq[DI]): Seq[DO]
/** Function to generate upward-flowing input parameters from the upward-flowing output parameters [[uiParams]].
*
* @param n
* Size of the output sequence.
* @param p
* Upward-flowing output edge parameters.
* @return
* A n-sized sequence of upward-flowing input edge parameters.
*/
protected[diplomacy] def mapParamsU(n: Int, p: Seq[UO]): Seq[UI]
/** @return
* The sink cardinality of the node, the number of outputs bound with [[BIND_QUERY]] summed with inputs bound with
* [[BIND_STAR]].
*/
protected[diplomacy] lazy val sinkCard: Int = oBindings.count(_._3 == BIND_QUERY) + iBindings.count(_._3 == BIND_STAR)
/** @return
* The source cardinality of this node, the number of inputs bound with [[BIND_QUERY]] summed with the number of
* output bindings bound with [[BIND_STAR]].
*/
protected[diplomacy] lazy val sourceCard: Int =
iBindings.count(_._3 == BIND_QUERY) + oBindings.count(_._3 == BIND_STAR)
/** @return list of nodes involved in flex bindings with this node. */
protected[diplomacy] lazy val flexes: Seq[BaseNode] =
oBindings.filter(_._3 == BIND_FLEX).map(_._2) ++ iBindings.filter(_._3 == BIND_FLEX).map(_._2)
/** Resolves the flex to be either source or sink and returns the offset where the [[BIND_STAR]] operators begin
* greedily taking up the remaining connections.
*
* @return
* A value >= 0 if it is sink cardinality, a negative value for source cardinality. The magnitude of the return
* value is not relevant.
*/
protected[diplomacy] lazy val flexOffset: Int = {
/** Recursively performs a depth-first search of the [[flexes]], [[BaseNode]]s connected to this node with flex
* operators. The algorithm bottoms out when we either get to a node we have already visited or when we get to a
* connection that is not a flex and can set the direction for us. Otherwise, recurse by visiting the `flexes` of
* each node in the current set and decide whether they should be added to the set or not.
*
* @return
* the mapping of [[BaseNode]] indexed by their serial numbers.
*/
def DFS(v: BaseNode, visited: Map[Int, BaseNode]): Map[Int, BaseNode] = {
if (visited.contains(v.serial) || !v.flexibleArityDirection) {
visited
} else {
v.flexes.foldLeft(visited + (v.serial -> v))((sum, n) => DFS(n, sum))
}
}
/** Determine which [[BaseNode]] are involved in resolving the flex connections to/from this node.
*
* @example
* {{{
* a :*=* b :*=* c
* d :*=* b
* e :*=* f
* }}}
*
* `flexSet` for `a`, `b`, `c`, or `d` will be `Set(a, b, c, d)` `flexSet` for `e` or `f` will be `Set(e,f)`
*/
val flexSet = DFS(this, Map()).values
/** The total number of :*= operators where we're on the left. */
val allSink = flexSet.map(_.sinkCard).sum
/** The total number of :=* operators used when we're on the right. */
val allSource = flexSet.map(_.sourceCard).sum
require(
allSink == 0 || allSource == 0,
s"The nodes ${flexSet.map(_.name)} which are inter-connected by :*=* have ${allSink} :*= operators and ${allSource} :=* operators connected to them, making it impossible to determine cardinality inference direction."
)
allSink - allSource
}
/** @return A value >= 0 if it is sink cardinality, a negative value for source cardinality. */
protected[diplomacy] def edgeArityDirection(n: BaseNode): Int = {
if (flexibleArityDirection) flexOffset
else if (n.flexibleArityDirection) n.flexOffset
else 0
}
/** For a node which is connected between two nodes, select the one that will influence the direction of the flex
* resolution.
*/
protected[diplomacy] def edgeAritySelect(n: BaseNode, l: => Int, r: => Int): Int = {
val dir = edgeArityDirection(n)
if (dir < 0) l
else if (dir > 0) r
else 1
}
/** Ensure that the same node is not visited twice in resolving `:*=`, etc operators. */
private var starCycleGuard = false
/** Resolve all the star operators into concrete indicies. As connections are being made, some may be "star"
* connections which need to be resolved. In some way to determine how many actual edges they correspond to. We also
* need to build up the ranges of edges which correspond to each binding operator, so that We can apply the correct
* edge parameters and later build up correct bundle connections.
*
* [[oPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that oPort (binding
* operator). [[iPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that iPort
* (binding operator). [[oStar]]: `Int` the value to return for this node `N` for any `N :*= foo` or `N :*=* foo :*=
* bar` [[iStar]]: `Int` the value to return for this node `N` for any `foo :=* N` or `bar :=* foo :*=* N`
*/
protected[diplomacy] lazy val (
oPortMapping: Seq[(Int, Int)],
iPortMapping: Seq[(Int, Int)],
oStar: Int,
iStar: Int
) = {
try {
if (starCycleGuard) throw StarCycleException()
starCycleGuard = true
// For a given node N...
// Number of foo :=* N
// + Number of bar :=* foo :*=* N
val oStars = oBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) < 0)
}
// Number of N :*= foo
// + Number of N :*=* foo :*= bar
val iStars = iBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) > 0)
}
// 1 for foo := N
// + bar.iStar for bar :*= foo :*=* N
// + foo.iStar for foo :*= N
// + 0 for foo :=* N
val oKnown = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, 0, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => 0
}
}.sum
// 1 for N := foo
// + bar.oStar for N :*=* foo :=* bar
// + foo.oStar for N :=* foo
// + 0 for N :*= foo
val iKnown = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, 0)
case BIND_QUERY => n.oStar
case BIND_STAR => 0
}
}.sum
// Resolve star depends on the node subclass to implement the algorithm for this.
val (iStar, oStar) = resolveStar(iKnown, oKnown, iStars, oStars)
// Cumulative list of resolved outward binding range starting points
val oSum = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, oStar, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => oStar
}
}.scanLeft(0)(_ + _)
// Cumulative list of resolved inward binding range starting points
val iSum = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, iStar)
case BIND_QUERY => n.oStar
case BIND_STAR => iStar
}
}.scanLeft(0)(_ + _)
// Create ranges for each binding based on the running sums and return
// those along with resolved values for the star operations.
(oSum.init.zip(oSum.tail), iSum.init.zip(iSum.tail), oStar, iStar)
} catch {
case c: StarCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Sequence of inward ports.
*
* This should be called after all star bindings are resolved.
*
* Each element is: `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding.
* `n` Instance of inward node. `p` View of [[Parameters]] where this connection was made. `s` Source info where this
* connection was made in the source code.
*/
protected[diplomacy] lazy val oDirectPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] =
oBindings.flatMap { case (i, n, _, p, s) =>
// for each binding operator in this node, look at what it connects to
val (start, end) = n.iPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
/** Sequence of outward ports.
*
* This should be called after all star bindings are resolved.
*
* `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. `n` Instance of
* outward node. `p` View of [[Parameters]] where this connection was made. `s` [[SourceInfo]] where this connection
* was made in the source code.
*/
protected[diplomacy] lazy val iDirectPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] =
iBindings.flatMap { case (i, n, _, p, s) =>
// query this port index range of this node in the other side of node.
val (start, end) = n.oPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
// Ephemeral nodes ( which have non-None iForward/oForward) have in_degree = out_degree
// Thus, there must exist an Eulerian path and the below algorithms terminate
@scala.annotation.tailrec
private def oTrace(
tuple: (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)
): (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.iForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => oTrace((j, m, p, s))
}
}
@scala.annotation.tailrec
private def iTrace(
tuple: (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)
): (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.oForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => iTrace((j, m, p, s))
}
}
/** Final output ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - Numeric index of this binding in the [[InwardNode]] on the other end.
* - [[InwardNode]] on the other end of this binding.
* - A view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val oPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oDirectPorts.map(oTrace)
/** Final input ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - numeric index of this binding in [[OutwardNode]] on the other end.
* - [[OutwardNode]] on the other end of this binding.
* - a view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val iPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iDirectPorts.map(iTrace)
private var oParamsCycleGuard = false
protected[diplomacy] lazy val diParams: Seq[DI] = iPorts.map { case (i, n, _, _) => n.doParams(i) }
protected[diplomacy] lazy val doParams: Seq[DO] = {
try {
if (oParamsCycleGuard) throw DownwardCycleException()
oParamsCycleGuard = true
val o = mapParamsD(oPorts.size, diParams)
require(
o.size == oPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of outward ports should equal the number of produced outward parameters.
|$context
|$connectedPortsInfo
|Downstreamed inward parameters: [${diParams.mkString(",")}]
|Produced outward parameters: [${o.mkString(",")}]
|""".stripMargin
)
o.map(outer.mixO(_, this))
} catch {
case c: DownwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
private var iParamsCycleGuard = false
protected[diplomacy] lazy val uoParams: Seq[UO] = oPorts.map { case (o, n, _, _) => n.uiParams(o) }
protected[diplomacy] lazy val uiParams: Seq[UI] = {
try {
if (iParamsCycleGuard) throw UpwardCycleException()
iParamsCycleGuard = true
val i = mapParamsU(iPorts.size, uoParams)
require(
i.size == iPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of inward ports should equal the number of produced inward parameters.
|$context
|$connectedPortsInfo
|Upstreamed outward parameters: [${uoParams.mkString(",")}]
|Produced inward parameters: [${i.mkString(",")}]
|""".stripMargin
)
i.map(inner.mixI(_, this))
} catch {
case c: UpwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Outward edge parameters. */
protected[diplomacy] lazy val edgesOut: Seq[EO] =
(oPorts.zip(doParams)).map { case ((i, n, p, s), o) => outer.edgeO(o, n.uiParams(i), p, s) }
/** Inward edge parameters. */
protected[diplomacy] lazy val edgesIn: Seq[EI] =
(iPorts.zip(uiParams)).map { case ((o, n, p, s), i) => inner.edgeI(n.doParams(o), i, p, s) }
/** A tuple of the input edge parameters and output edge parameters for the edges bound to this node.
*
* If you need to access to the edges of a foreign Node, use this method (in/out create bundles).
*/
lazy val edges: Edges[EI, EO] = Edges(edgesIn, edgesOut)
/** Create actual Wires corresponding to the Bundles parameterized by the outward edges of this node. */
protected[diplomacy] lazy val bundleOut: Seq[BO] = edgesOut.map { e =>
val x = Wire(outer.bundleO(e)).suggestName(s"${valName.value}Out")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
/** Create actual Wires corresponding to the Bundles parameterized by the inward edges of this node. */
protected[diplomacy] lazy val bundleIn: Seq[BI] = edgesIn.map { e =>
val x = Wire(inner.bundleI(e)).suggestName(s"${valName.value}In")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
private def emptyDanglesOut: Seq[Dangle] = oPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(serial, i),
sink = HalfEdge(n.serial, j),
flipped = false,
name = wirePrefix + "out",
dataOpt = None
)
}
private def emptyDanglesIn: Seq[Dangle] = iPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(n.serial, j),
sink = HalfEdge(serial, i),
flipped = true,
name = wirePrefix + "in",
dataOpt = None
)
}
/** Create the [[Dangle]]s which describe the connections from this node output to other nodes inputs. */
protected[diplomacy] def danglesOut: Seq[Dangle] = emptyDanglesOut.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleOut(i)))
}
/** Create the [[Dangle]]s which describe the connections from this node input from other nodes outputs. */
protected[diplomacy] def danglesIn: Seq[Dangle] = emptyDanglesIn.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleIn(i)))
}
private[diplomacy] var instantiated = false
/** Gather Bundle and edge parameters of outward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def out: Seq[(BO, EO)] = {
require(
instantiated,
s"$name.out should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleOut.zip(edgesOut)
}
/** Gather Bundle and edge parameters of inward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def in: Seq[(BI, EI)] = {
require(
instantiated,
s"$name.in should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleIn.zip(edgesIn)
}
/** Actually instantiate this node during [[LazyModuleImp]] evaluation. Mark that it's safe to use the Bundle wires,
* instantiate monitors on all input ports if appropriate, and return all the dangles of this node.
*/
protected[diplomacy] def instantiate(): Seq[Dangle] = {
instantiated = true
if (!circuitIdentity) {
(iPorts.zip(in)).foreach { case ((_, _, p, _), (b, e)) => if (p(MonitorsEnabled)) inner.monitor(b, e) }
}
danglesOut ++ danglesIn
}
protected[diplomacy] def cloneDangles(): Seq[Dangle] = emptyDanglesOut ++ emptyDanglesIn
/** Connects the outward part of a node with the inward part of this node. */
protected[diplomacy] def bind(
h: OutwardNode[DI, UI, BI],
binding: NodeBinding
)(
implicit p: Parameters,
sourceInfo: SourceInfo
): Unit = {
val x = this // x := y
val y = h
sourceLine(sourceInfo, " at ", "")
val i = x.iPushed
val o = y.oPushed
y.oPush(
i,
x,
binding match {
case BIND_ONCE => BIND_ONCE
case BIND_FLEX => BIND_FLEX
case BIND_STAR => BIND_QUERY
case BIND_QUERY => BIND_STAR
}
)
x.iPush(o, y, binding)
}
/* Metadata for printing the node graph. */
def inputs: Seq[(OutwardNode[DI, UI, BI], RenderedEdge)] = (iPorts.zip(edgesIn)).map { case ((_, n, p, _), e) =>
val re = inner.render(e)
(n, re.copy(flipped = re.flipped != p(RenderFlipped)))
}
/** Metadata for printing the node graph */
def outputs: Seq[(InwardNode[DO, UO, BO], RenderedEdge)] = oPorts.map { case (i, n, _, _) => (n, n.inputs(i)._2) }
}
File Edges.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
class TLEdge(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdgeParameters(client, manager, params, sourceInfo)
{
def isAligned(address: UInt, lgSize: UInt): Bool = {
if (maxLgSize == 0) true.B else {
val mask = UIntToOH1(lgSize, maxLgSize)
(address & mask) === 0.U
}
}
def mask(address: UInt, lgSize: UInt): UInt =
MaskGen(address, lgSize, manager.beatBytes)
def staticHasData(bundle: TLChannel): Option[Boolean] = {
bundle match {
case _:TLBundleA => {
// Do there exist A messages with Data?
val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial
// Do there exist A messages without Data?
val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint
// Statically optimize the case where hasData is a constant
if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None
}
case _:TLBundleB => {
// Do there exist B messages with Data?
val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial
// Do there exist B messages without Data?
val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint
// Statically optimize the case where hasData is a constant
if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None
}
case _:TLBundleC => {
// Do there eixst C messages with Data?
val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe
// Do there exist C messages without Data?
val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe
if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None
}
case _:TLBundleD => {
// Do there eixst D messages with Data?
val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB
// Do there exist D messages without Data?
val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT
if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None
}
case _:TLBundleE => Some(false)
}
}
def isRequest(x: TLChannel): Bool = {
x match {
case a: TLBundleA => true.B
case b: TLBundleB => true.B
case c: TLBundleC => c.opcode(2) && c.opcode(1)
// opcode === TLMessages.Release ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(2) && !d.opcode(1)
// opcode === TLMessages.Grant ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
}
def isResponse(x: TLChannel): Bool = {
x match {
case a: TLBundleA => false.B
case b: TLBundleB => false.B
case c: TLBundleC => !c.opcode(2) || !c.opcode(1)
// opcode =/= TLMessages.Release &&
// opcode =/= TLMessages.ReleaseData
case d: TLBundleD => true.B // Grant isResponse + isRequest
case e: TLBundleE => true.B
}
}
def hasData(x: TLChannel): Bool = {
val opdata = x match {
case a: TLBundleA => !a.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case b: TLBundleB => !b.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case c: TLBundleC => c.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.ProbeAckData ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
staticHasData(x).map(_.B).getOrElse(opdata)
}
def opcode(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.opcode
case b: TLBundleB => b.opcode
case c: TLBundleC => c.opcode
case d: TLBundleD => d.opcode
}
}
def param(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.param
case b: TLBundleB => b.param
case c: TLBundleC => c.param
case d: TLBundleD => d.param
}
}
def size(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.size
case b: TLBundleB => b.size
case c: TLBundleC => c.size
case d: TLBundleD => d.size
}
}
def data(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.data
case b: TLBundleB => b.data
case c: TLBundleC => c.data
case d: TLBundleD => d.data
}
}
def corrupt(x: TLDataChannel): Bool = {
x match {
case a: TLBundleA => a.corrupt
case b: TLBundleB => b.corrupt
case c: TLBundleC => c.corrupt
case d: TLBundleD => d.corrupt
}
}
def mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.mask
case b: TLBundleB => b.mask
case c: TLBundleC => mask(c.address, c.size)
}
}
def full_mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => mask(a.address, a.size)
case b: TLBundleB => mask(b.address, b.size)
case c: TLBundleC => mask(c.address, c.size)
}
}
def address(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.address
case b: TLBundleB => b.address
case c: TLBundleC => c.address
}
}
def source(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.source
case b: TLBundleB => b.source
case c: TLBundleC => c.source
case d: TLBundleD => d.source
}
}
def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes)
def addr_lo(x: UInt): UInt =
if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0)
def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x))
def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x))
def numBeats(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 1.U
case bundle: TLDataChannel => {
val hasData = this.hasData(bundle)
val size = this.size(bundle)
val cutoff = log2Ceil(manager.beatBytes)
val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U
val decode = UIntToOH(size, maxLgSize+1) >> cutoff
Mux(hasData, decode | small.asUInt, 1.U)
}
}
}
def numBeats1(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 0.U
case bundle: TLDataChannel => {
if (maxLgSize == 0) {
0.U
} else {
val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes)
Mux(hasData(bundle), decode, 0.U)
}
}
}
}
def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val beats1 = numBeats1(bits)
val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W))
val counter1 = counter - 1.U
val first = counter === 0.U
val last = counter === 1.U || beats1 === 0.U
val done = last && fire
val count = (beats1 & ~counter1)
when (fire) {
counter := Mux(first, beats1, counter1)
}
(first, last, done, count)
}
def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1
def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire)
def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid)
def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2
def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire)
def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid)
def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3
def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire)
def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid)
def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3)
}
def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire)
def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid)
def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4)
}
def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire)
def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid)
def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes))
}
def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire)
def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid)
// Does the request need T permissions to be executed?
def needT(a: TLBundleA): Bool = {
val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLPermissions.NtoB -> false.B,
TLPermissions.NtoT -> true.B,
TLPermissions.BtoT -> true.B))
MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array(
TLMessages.PutFullData -> true.B,
TLMessages.PutPartialData -> true.B,
TLMessages.ArithmeticData -> true.B,
TLMessages.LogicalData -> true.B,
TLMessages.Get -> false.B,
TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLHints.PREFETCH_READ -> false.B,
TLHints.PREFETCH_WRITE -> true.B)),
TLMessages.AcquireBlock -> acq_needT,
TLMessages.AcquirePerm -> acq_needT))
}
// This is a very expensive circuit; use only if you really mean it!
def inFlight(x: TLBundle): (UInt, UInt) = {
val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W))
val bce = manager.anySupportAcquireB && client.anySupportProbe
val (a_first, a_last, _) = firstlast(x.a)
val (b_first, b_last, _) = firstlast(x.b)
val (c_first, c_last, _) = firstlast(x.c)
val (d_first, d_last, _) = firstlast(x.d)
val (e_first, e_last, _) = firstlast(x.e)
val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits))
val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits))
val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits))
val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits))
val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits))
val a_inc = x.a.fire && a_first && a_request
val b_inc = x.b.fire && b_first && b_request
val c_inc = x.c.fire && c_first && c_request
val d_inc = x.d.fire && d_first && d_request
val e_inc = x.e.fire && e_first && e_request
val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil))
val a_dec = x.a.fire && a_last && a_response
val b_dec = x.b.fire && b_last && b_response
val c_dec = x.c.fire && c_last && c_response
val d_dec = x.d.fire && d_last && d_response
val e_dec = x.e.fire && e_last && e_response
val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil))
val next_flight = flight + PopCount(inc) - PopCount(dec)
flight := next_flight
(flight, next_flight)
}
def prettySourceMapping(context: String): String = {
s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n"
}
}
class TLEdgeOut(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
// Transfers
def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquireBlock
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquirePerm
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.Release
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ReleaseData
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) =
Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B)
def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAck
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions, data)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAckData
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B)
def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink)
def GrantAck(toSink: UInt): TLBundleE = {
val e = Wire(new TLBundleE(bundle))
e.sink := toSink
e
}
// Accesses
def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsGetFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Get
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutFullFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutFullData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, mask, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutPartialFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutPartialData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = {
require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsArithmeticFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.ArithmeticData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsLogicalFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.LogicalData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = {
require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsHintFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Hint
a.param := param
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data)
def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAckData
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size)
def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.HintAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
}
class TLEdgeIn(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = {
val todo = x.filter(!_.isEmpty)
val heads = todo.map(_.head)
val tails = todo.map(_.tail)
if (todo.isEmpty) Nil else { heads +: myTranspose(tails) }
}
// Transfers
def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = {
require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}")
val legal = client.supportsProbe(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Probe
b.param := capPermissions
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.Grant
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.GrantData
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B)
def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.ReleaseAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
// Accesses
def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = {
require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsGet(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Get
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsPutFull(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutFullData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, mask, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsPutPartial(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutPartialData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsArithmetic(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.ArithmeticData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsLogical(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.LogicalData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = {
require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsHint(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Hint
b.param := param
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size)
def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied)
def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data)
def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAckData
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B)
def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied)
def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B)
def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.HintAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
}
File Arbiter.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.util.random.LFSR
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
object TLArbiter
{
// (valids, select) => readys
type Policy = (Integer, UInt, Bool) => UInt
val lowestIndexFirst: Policy = (width, valids, select) => ~(leftOR(valids) << 1)(width-1, 0)
val highestIndexFirst: Policy = (width, valids, select) => ~((rightOR(valids) >> 1).pad(width))
val roundRobin: Policy = (width, valids, select) => if (width == 1) 1.U(1.W) else {
val valid = valids(width-1, 0)
assert (valid === valids)
val mask = RegInit(((BigInt(1) << width)-1).U(width-1,0))
val filter = Cat(valid & ~mask, valid)
val unready = (rightOR(filter, width*2, width) >> 1) | (mask << width)
val readys = ~((unready >> width) & unready(width-1, 0))
when (select && valid.orR) {
mask := leftOR(readys & valid, width)
}
readys(width-1, 0)
}
def lowestFromSeq[T <: TLChannel](edge: TLEdge, sink: DecoupledIO[T], sources: Seq[DecoupledIO[T]]): Unit = {
apply(lowestIndexFirst)(sink, sources.map(s => (edge.numBeats1(s.bits), s)):_*)
}
def lowest[T <: TLChannel](edge: TLEdge, sink: DecoupledIO[T], sources: DecoupledIO[T]*): Unit = {
apply(lowestIndexFirst)(sink, sources.toList.map(s => (edge.numBeats1(s.bits), s)):_*)
}
def highest[T <: TLChannel](edge: TLEdge, sink: DecoupledIO[T], sources: DecoupledIO[T]*): Unit = {
apply(highestIndexFirst)(sink, sources.toList.map(s => (edge.numBeats1(s.bits), s)):_*)
}
def robin[T <: TLChannel](edge: TLEdge, sink: DecoupledIO[T], sources: DecoupledIO[T]*): Unit = {
apply(roundRobin)(sink, sources.toList.map(s => (edge.numBeats1(s.bits), s)):_*)
}
def apply[T <: Data](policy: Policy)(sink: DecoupledIO[T], sources: (UInt, DecoupledIO[T])*): Unit = {
if (sources.isEmpty) {
sink.bits := DontCare
} else if (sources.size == 1) {
sink :<>= sources.head._2
} else {
val pairs = sources.toList
val beatsIn = pairs.map(_._1)
val sourcesIn = pairs.map(_._2)
// The number of beats which remain to be sent
val beatsLeft = RegInit(0.U)
val idle = beatsLeft === 0.U
val latch = idle && sink.ready // winner (if any) claims sink
// Who wants access to the sink?
val valids = sourcesIn.map(_.valid)
// Arbitrate amongst the requests
val readys = VecInit(policy(valids.size, Cat(valids.reverse), latch).asBools)
// Which request wins arbitration?
val winner = VecInit((readys zip valids) map { case (r,v) => r&&v })
// Confirm the policy works properly
require (readys.size == valids.size)
// Never two winners
val prefixOR = winner.scanLeft(false.B)(_||_).init
assert((prefixOR zip winner) map { case (p,w) => !p || !w } reduce {_ && _})
// If there was any request, there is a winner
assert (!valids.reduce(_||_) || winner.reduce(_||_))
// Track remaining beats
val maskedBeats = (winner zip beatsIn) map { case (w,b) => Mux(w, b, 0.U) }
val initBeats = maskedBeats.reduce(_ | _) // no winner => 0 beats
beatsLeft := Mux(latch, initBeats, beatsLeft - sink.fire)
// The one-hot source granted access in the previous cycle
val state = RegInit(VecInit(Seq.fill(sources.size)(false.B)))
val muxState = Mux(idle, winner, state)
state := muxState
val allowed = Mux(idle, readys, state)
(sourcesIn zip allowed) foreach { case (s, r) =>
s.ready := sink.ready && r
}
sink.valid := Mux(idle, valids.reduce(_||_), Mux1H(state, valids))
sink.bits :<= Mux1H(muxState, sourcesIn.map(_.bits))
}
}
}
// Synthesizable unit tests
import freechips.rocketchip.unittest._
abstract class DecoupledArbiterTest(
policy: TLArbiter.Policy,
txns: Int,
timeout: Int,
val numSources: Int,
beatsLeftFromIdx: Int => UInt)
(implicit p: Parameters) extends UnitTest(timeout)
{
val sources = Wire(Vec(numSources, DecoupledIO(UInt(log2Ceil(numSources).W))))
dontTouch(sources.suggestName("sources"))
val sink = Wire(DecoupledIO(UInt(log2Ceil(numSources).W)))
dontTouch(sink.suggestName("sink"))
val count = RegInit(0.U(log2Ceil(txns).W))
val lfsr = LFSR(16, true.B)
sources.zipWithIndex.map { case (z, i) => z.bits := i.U }
TLArbiter(policy)(sink, sources.zipWithIndex.map {
case (z, i) => (beatsLeftFromIdx(i), z)
}:_*)
count := count + 1.U
io.finished := count >= txns.U
}
/** This tests that when a specific pattern of source valids are driven,
* a new index from amongst that pattern is always selected,
* unless one of those sources takes multiple beats,
* in which case the same index should be selected until the arbiter goes idle.
*/
class TLDecoupledArbiterRobinTest(txns: Int = 128, timeout: Int = 500000, print: Boolean = false)
(implicit p: Parameters)
extends DecoupledArbiterTest(TLArbiter.roundRobin, txns, timeout, 6, i => i.U)
{
val lastWinner = RegInit((numSources+1).U)
val beatsLeft = RegInit(0.U(log2Ceil(numSources).W))
val first = lastWinner > numSources.U
val valid = lfsr(0)
val ready = lfsr(15)
sink.ready := ready
sources.zipWithIndex.map { // pattern: every even-indexed valid is driven the same random way
case (s, i) => s.valid := (if (i % 2 == 1) false.B else valid)
}
when (sink.fire) {
if (print) { printf("TestRobin: %d\n", sink.bits) }
when (beatsLeft === 0.U) {
assert(lastWinner =/= sink.bits, "Round robin did not pick a new idx despite one being valid.")
lastWinner := sink.bits
beatsLeft := sink.bits
} .otherwise {
assert(lastWinner === sink.bits, "Round robin did not pick the same index over multiple beats")
beatsLeft := beatsLeft - 1.U
}
}
if (print) {
when (!sink.fire) { printf("TestRobin: idle (%d %d)\n", valid, ready) }
}
}
/** This tests that the lowest index is always selected across random single cycle transactions. */
class TLDecoupledArbiterLowestTest(txns: Int = 128, timeout: Int = 500000)(implicit p: Parameters)
extends DecoupledArbiterTest(TLArbiter.lowestIndexFirst, txns, timeout, 15, _ => 0.U)
{
def assertLowest(id: Int): Unit = {
when (sources(id).valid) {
assert((numSources-1 until id by -1).map(!sources(_).fire).foldLeft(true.B)(_&&_), s"$id was valid but a higher valid source was granted ready.")
}
}
sources.zipWithIndex.map { case (s, i) => s.valid := lfsr(i) }
sink.ready := lfsr(15)
when (sink.fire) { (0 until numSources).foreach(assertLowest(_)) }
}
/** This tests that the highest index is always selected across random single cycle transactions. */
class TLDecoupledArbiterHighestTest(txns: Int = 128, timeout: Int = 500000)(implicit p: Parameters)
extends DecoupledArbiterTest(TLArbiter.highestIndexFirst, txns, timeout, 15, _ => 0.U)
{
def assertHighest(id: Int): Unit = {
when (sources(id).valid) {
assert((0 until id).map(!sources(_).fire).foldLeft(true.B)(_&&_), s"$id was valid but a lower valid source was granted ready.")
}
}
sources.zipWithIndex.map { case (s, i) => s.valid := lfsr(i) }
sink.ready := lfsr(15)
when (sink.fire) { (0 until numSources).foreach(assertHighest(_)) }
}
| module TLAtomicAutomata_cbus( // @[AtomicAutomata.scala:36:9]
input clock, // @[AtomicAutomata.scala:36:9]
input reset, // @[AtomicAutomata.scala:36:9]
output auto_in_a_ready, // @[LazyModuleImp.scala:107:25]
input auto_in_a_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_a_bits_param, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_in_a_bits_size, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_in_a_bits_source, // @[LazyModuleImp.scala:107:25]
input [28:0] auto_in_a_bits_address, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_in_a_bits_mask, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_in_a_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_in_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_in_d_ready, // @[LazyModuleImp.scala:107:25]
output auto_in_d_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_in_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_in_d_bits_param, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_in_d_bits_size, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_in_d_bits_source, // @[LazyModuleImp.scala:107:25]
output auto_in_d_bits_sink, // @[LazyModuleImp.scala:107:25]
output auto_in_d_bits_denied, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_in_d_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_in_d_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_out_a_ready, // @[LazyModuleImp.scala:107:25]
output auto_out_a_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_a_bits_param, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_out_a_bits_size, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_out_a_bits_source, // @[LazyModuleImp.scala:107:25]
output [28:0] auto_out_a_bits_address, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_out_a_bits_mask, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_out_a_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_out_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output auto_out_d_ready, // @[LazyModuleImp.scala:107:25]
input auto_out_d_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_out_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_out_d_bits_param, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_out_d_bits_size, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_out_d_bits_source, // @[LazyModuleImp.scala:107:25]
input auto_out_d_bits_sink, // @[LazyModuleImp.scala:107:25]
input auto_out_d_bits_denied, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_out_d_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_out_d_bits_corrupt // @[LazyModuleImp.scala:107:25]
);
wire auto_in_a_valid_0 = auto_in_a_valid; // @[AtomicAutomata.scala:36:9]
wire [2:0] auto_in_a_bits_opcode_0 = auto_in_a_bits_opcode; // @[AtomicAutomata.scala:36:9]
wire [2:0] auto_in_a_bits_param_0 = auto_in_a_bits_param; // @[AtomicAutomata.scala:36:9]
wire [3:0] auto_in_a_bits_size_0 = auto_in_a_bits_size; // @[AtomicAutomata.scala:36:9]
wire [7:0] auto_in_a_bits_source_0 = auto_in_a_bits_source; // @[AtomicAutomata.scala:36:9]
wire [28:0] auto_in_a_bits_address_0 = auto_in_a_bits_address; // @[AtomicAutomata.scala:36:9]
wire [7:0] auto_in_a_bits_mask_0 = auto_in_a_bits_mask; // @[AtomicAutomata.scala:36:9]
wire [63:0] auto_in_a_bits_data_0 = auto_in_a_bits_data; // @[AtomicAutomata.scala:36:9]
wire auto_in_a_bits_corrupt_0 = auto_in_a_bits_corrupt; // @[AtomicAutomata.scala:36:9]
wire auto_in_d_ready_0 = auto_in_d_ready; // @[AtomicAutomata.scala:36:9]
wire auto_out_a_ready_0 = auto_out_a_ready; // @[AtomicAutomata.scala:36:9]
wire auto_out_d_valid_0 = auto_out_d_valid; // @[AtomicAutomata.scala:36:9]
wire [2:0] auto_out_d_bits_opcode_0 = auto_out_d_bits_opcode; // @[AtomicAutomata.scala:36:9]
wire [1:0] auto_out_d_bits_param_0 = auto_out_d_bits_param; // @[AtomicAutomata.scala:36:9]
wire [3:0] auto_out_d_bits_size_0 = auto_out_d_bits_size; // @[AtomicAutomata.scala:36:9]
wire [7:0] auto_out_d_bits_source_0 = auto_out_d_bits_source; // @[AtomicAutomata.scala:36:9]
wire auto_out_d_bits_sink_0 = auto_out_d_bits_sink; // @[AtomicAutomata.scala:36:9]
wire auto_out_d_bits_denied_0 = auto_out_d_bits_denied; // @[AtomicAutomata.scala:36:9]
wire [63:0] auto_out_d_bits_data_0 = auto_out_d_bits_data; // @[AtomicAutomata.scala:36:9]
wire auto_out_d_bits_corrupt_0 = auto_out_d_bits_corrupt; // @[AtomicAutomata.scala:36:9]
wire _a_canLogical_T = 1'h1; // @[Parameters.scala:92:28]
wire _a_canArithmetic_T = 1'h1; // @[Parameters.scala:92:28]
wire _a_cam_sel_put_T = 1'h1; // @[AtomicAutomata.scala:103:83]
wire _a_fifoId_T_4 = 1'h1; // @[Parameters.scala:137:59]
wire _a_cam_busy_T = 1'h1; // @[AtomicAutomata.scala:111:60]
wire _a_cam_sel_free_T = 1'h1; // @[AtomicAutomata.scala:116:85]
wire _source_c_bits_legal_T = 1'h1; // @[Parameters.scala:92:28]
wire _source_c_bits_legal_T_10 = 1'h1; // @[Parameters.scala:92:28]
wire _a_canLogical_T_16 = 1'h0; // @[Parameters.scala:684:29]
wire _a_canLogical_T_46 = 1'h0; // @[Parameters.scala:684:54]
wire _a_canArithmetic_T_16 = 1'h0; // @[Parameters.scala:684:29]
wire _a_canArithmetic_T_46 = 1'h0; // @[Parameters.scala:684:54]
wire _source_c_bits_legal_T_56 = 1'h0; // @[Parameters.scala:684:29]
wire _source_c_bits_legal_T_62 = 1'h0; // @[Parameters.scala:684:54]
wire maskedBeats_0 = 1'h0; // @[Arbiter.scala:82:69]
wire _state_WIRE_0 = 1'h0; // @[Arbiter.scala:88:34]
wire _state_WIRE_1 = 1'h0; // @[Arbiter.scala:88:34]
wire [2:0] source_c_bits_opcode = 3'h0; // @[AtomicAutomata.scala:165:28]
wire [2:0] source_c_bits_param = 3'h0; // @[AtomicAutomata.scala:165:28]
wire [2:0] source_c_bits_a_opcode = 3'h0; // @[Edges.scala:480:17]
wire [2:0] source_c_bits_a_param = 3'h0; // @[Edges.scala:480:17]
wire [2:0] _nodeOut_a_bits_T_18 = 3'h0; // @[Mux.scala:30:73]
wire [2:0] _nodeOut_a_bits_T_21 = 3'h0; // @[Mux.scala:30:73]
wire [29:0] _a_fifoId_T_2 = 30'h0; // @[Parameters.scala:137:46]
wire [29:0] _a_fifoId_T_3 = 30'h0; // @[Parameters.scala:137:46]
wire [1:0] initval_state = 2'h0; // @[AtomicAutomata.scala:80:27]
wire [1:0] _cam_s_WIRE_0_state = 2'h0; // @[AtomicAutomata.scala:82:50]
wire nodeIn_a_ready; // @[MixedNode.scala:551:17]
wire nodeIn_a_valid = auto_in_a_valid_0; // @[AtomicAutomata.scala:36:9]
wire [2:0] nodeIn_a_bits_opcode = auto_in_a_bits_opcode_0; // @[AtomicAutomata.scala:36:9]
wire [2:0] nodeIn_a_bits_param = auto_in_a_bits_param_0; // @[AtomicAutomata.scala:36:9]
wire [3:0] nodeIn_a_bits_size = auto_in_a_bits_size_0; // @[AtomicAutomata.scala:36:9]
wire [7:0] nodeIn_a_bits_source = auto_in_a_bits_source_0; // @[AtomicAutomata.scala:36:9]
wire [28:0] nodeIn_a_bits_address = auto_in_a_bits_address_0; // @[AtomicAutomata.scala:36:9]
wire [7:0] nodeIn_a_bits_mask = auto_in_a_bits_mask_0; // @[AtomicAutomata.scala:36:9]
wire [63:0] nodeIn_a_bits_data = auto_in_a_bits_data_0; // @[AtomicAutomata.scala:36:9]
wire nodeIn_a_bits_corrupt = auto_in_a_bits_corrupt_0; // @[AtomicAutomata.scala:36:9]
wire nodeIn_d_ready = auto_in_d_ready_0; // @[AtomicAutomata.scala:36:9]
wire nodeIn_d_valid; // @[MixedNode.scala:551:17]
wire [2:0] nodeIn_d_bits_opcode; // @[MixedNode.scala:551:17]
wire [1:0] nodeIn_d_bits_param; // @[MixedNode.scala:551:17]
wire [3:0] nodeIn_d_bits_size; // @[MixedNode.scala:551:17]
wire [7:0] nodeIn_d_bits_source; // @[MixedNode.scala:551:17]
wire nodeIn_d_bits_sink; // @[MixedNode.scala:551:17]
wire nodeIn_d_bits_denied; // @[MixedNode.scala:551:17]
wire [63:0] nodeIn_d_bits_data; // @[MixedNode.scala:551:17]
wire nodeIn_d_bits_corrupt; // @[MixedNode.scala:551:17]
wire nodeOut_a_ready = auto_out_a_ready_0; // @[AtomicAutomata.scala:36:9]
wire nodeOut_a_valid; // @[MixedNode.scala:542:17]
wire [2:0] nodeOut_a_bits_opcode; // @[MixedNode.scala:542:17]
wire [2:0] nodeOut_a_bits_param; // @[MixedNode.scala:542:17]
wire [3:0] nodeOut_a_bits_size; // @[MixedNode.scala:542:17]
wire [7:0] nodeOut_a_bits_source; // @[MixedNode.scala:542:17]
wire [28:0] nodeOut_a_bits_address; // @[MixedNode.scala:542:17]
wire [7:0] nodeOut_a_bits_mask; // @[MixedNode.scala:542:17]
wire [63:0] nodeOut_a_bits_data; // @[MixedNode.scala:542:17]
wire nodeOut_a_bits_corrupt; // @[MixedNode.scala:542:17]
wire nodeOut_d_ready; // @[MixedNode.scala:542:17]
wire nodeOut_d_valid = auto_out_d_valid_0; // @[AtomicAutomata.scala:36:9]
wire [2:0] nodeOut_d_bits_opcode = auto_out_d_bits_opcode_0; // @[AtomicAutomata.scala:36:9]
wire [1:0] nodeOut_d_bits_param = auto_out_d_bits_param_0; // @[AtomicAutomata.scala:36:9]
wire [3:0] nodeOut_d_bits_size = auto_out_d_bits_size_0; // @[AtomicAutomata.scala:36:9]
wire [7:0] nodeOut_d_bits_source = auto_out_d_bits_source_0; // @[AtomicAutomata.scala:36:9]
wire nodeOut_d_bits_sink = auto_out_d_bits_sink_0; // @[AtomicAutomata.scala:36:9]
wire nodeOut_d_bits_denied = auto_out_d_bits_denied_0; // @[AtomicAutomata.scala:36:9]
wire [63:0] nodeOut_d_bits_data = auto_out_d_bits_data_0; // @[AtomicAutomata.scala:36:9]
wire nodeOut_d_bits_corrupt = auto_out_d_bits_corrupt_0; // @[AtomicAutomata.scala:36:9]
wire auto_in_a_ready_0; // @[AtomicAutomata.scala:36:9]
wire [2:0] auto_in_d_bits_opcode_0; // @[AtomicAutomata.scala:36:9]
wire [1:0] auto_in_d_bits_param_0; // @[AtomicAutomata.scala:36:9]
wire [3:0] auto_in_d_bits_size_0; // @[AtomicAutomata.scala:36:9]
wire [7:0] auto_in_d_bits_source_0; // @[AtomicAutomata.scala:36:9]
wire auto_in_d_bits_sink_0; // @[AtomicAutomata.scala:36:9]
wire auto_in_d_bits_denied_0; // @[AtomicAutomata.scala:36:9]
wire [63:0] auto_in_d_bits_data_0; // @[AtomicAutomata.scala:36:9]
wire auto_in_d_bits_corrupt_0; // @[AtomicAutomata.scala:36:9]
wire auto_in_d_valid_0; // @[AtomicAutomata.scala:36:9]
wire [2:0] auto_out_a_bits_opcode_0; // @[AtomicAutomata.scala:36:9]
wire [2:0] auto_out_a_bits_param_0; // @[AtomicAutomata.scala:36:9]
wire [3:0] auto_out_a_bits_size_0; // @[AtomicAutomata.scala:36:9]
wire [7:0] auto_out_a_bits_source_0; // @[AtomicAutomata.scala:36:9]
wire [28:0] auto_out_a_bits_address_0; // @[AtomicAutomata.scala:36:9]
wire [7:0] auto_out_a_bits_mask_0; // @[AtomicAutomata.scala:36:9]
wire [63:0] auto_out_a_bits_data_0; // @[AtomicAutomata.scala:36:9]
wire auto_out_a_bits_corrupt_0; // @[AtomicAutomata.scala:36:9]
wire auto_out_a_valid_0; // @[AtomicAutomata.scala:36:9]
wire auto_out_d_ready_0; // @[AtomicAutomata.scala:36:9]
wire _nodeIn_a_ready_T; // @[AtomicAutomata.scala:156:38]
assign auto_in_a_ready_0 = nodeIn_a_ready; // @[AtomicAutomata.scala:36:9]
wire [3:0] source_i_bits_size = nodeIn_a_bits_size; // @[AtomicAutomata.scala:154:28]
wire [7:0] source_i_bits_source = nodeIn_a_bits_source; // @[AtomicAutomata.scala:154:28]
wire [28:0] _a_canLogical_T_17 = nodeIn_a_bits_address; // @[Parameters.scala:137:31]
wire [28:0] _a_canArithmetic_T_17 = nodeIn_a_bits_address; // @[Parameters.scala:137:31]
wire [28:0] _a_fifoId_T = nodeIn_a_bits_address; // @[Parameters.scala:137:31]
wire [28:0] source_i_bits_address = nodeIn_a_bits_address; // @[AtomicAutomata.scala:154:28]
wire [7:0] source_i_bits_mask = nodeIn_a_bits_mask; // @[AtomicAutomata.scala:154:28]
wire [63:0] source_i_bits_data = nodeIn_a_bits_data; // @[AtomicAutomata.scala:154:28]
wire source_i_bits_corrupt = nodeIn_a_bits_corrupt; // @[AtomicAutomata.scala:154:28]
wire _nodeIn_d_valid_T_1; // @[AtomicAutomata.scala:241:35]
assign auto_in_d_valid_0 = nodeIn_d_valid; // @[AtomicAutomata.scala:36:9]
assign auto_in_d_bits_opcode_0 = nodeIn_d_bits_opcode; // @[AtomicAutomata.scala:36:9]
assign auto_in_d_bits_param_0 = nodeIn_d_bits_param; // @[AtomicAutomata.scala:36:9]
assign auto_in_d_bits_size_0 = nodeIn_d_bits_size; // @[AtomicAutomata.scala:36:9]
assign auto_in_d_bits_source_0 = nodeIn_d_bits_source; // @[AtomicAutomata.scala:36:9]
assign auto_in_d_bits_sink_0 = nodeIn_d_bits_sink; // @[AtomicAutomata.scala:36:9]
assign auto_in_d_bits_denied_0 = nodeIn_d_bits_denied; // @[AtomicAutomata.scala:36:9]
assign auto_in_d_bits_data_0 = nodeIn_d_bits_data; // @[AtomicAutomata.scala:36:9]
assign auto_in_d_bits_corrupt_0 = nodeIn_d_bits_corrupt; // @[AtomicAutomata.scala:36:9]
wire _nodeOut_a_valid_T_4; // @[Arbiter.scala:96:24]
assign auto_out_a_valid_0 = nodeOut_a_valid; // @[AtomicAutomata.scala:36:9]
wire [2:0] _nodeOut_a_bits_WIRE_opcode; // @[Mux.scala:30:73]
assign auto_out_a_bits_opcode_0 = nodeOut_a_bits_opcode; // @[AtomicAutomata.scala:36:9]
wire [2:0] _nodeOut_a_bits_WIRE_param; // @[Mux.scala:30:73]
assign auto_out_a_bits_param_0 = nodeOut_a_bits_param; // @[AtomicAutomata.scala:36:9]
wire [3:0] _nodeOut_a_bits_WIRE_size; // @[Mux.scala:30:73]
assign auto_out_a_bits_size_0 = nodeOut_a_bits_size; // @[AtomicAutomata.scala:36:9]
wire [7:0] _nodeOut_a_bits_WIRE_source; // @[Mux.scala:30:73]
assign auto_out_a_bits_source_0 = nodeOut_a_bits_source; // @[AtomicAutomata.scala:36:9]
wire [28:0] _nodeOut_a_bits_WIRE_address; // @[Mux.scala:30:73]
assign auto_out_a_bits_address_0 = nodeOut_a_bits_address; // @[AtomicAutomata.scala:36:9]
wire [7:0] _nodeOut_a_bits_WIRE_mask; // @[Mux.scala:30:73]
assign auto_out_a_bits_mask_0 = nodeOut_a_bits_mask; // @[AtomicAutomata.scala:36:9]
wire [63:0] _nodeOut_a_bits_WIRE_data; // @[Mux.scala:30:73]
assign auto_out_a_bits_data_0 = nodeOut_a_bits_data; // @[AtomicAutomata.scala:36:9]
wire _nodeOut_a_bits_WIRE_corrupt; // @[Mux.scala:30:73]
assign auto_out_a_bits_corrupt_0 = nodeOut_a_bits_corrupt; // @[AtomicAutomata.scala:36:9]
wire _nodeOut_d_ready_T; // @[AtomicAutomata.scala:242:35]
assign auto_out_d_ready_0 = nodeOut_d_ready; // @[AtomicAutomata.scala:36:9]
assign nodeIn_d_bits_param = nodeOut_d_bits_param; // @[MixedNode.scala:542:17, :551:17]
assign nodeIn_d_bits_size = nodeOut_d_bits_size; // @[MixedNode.scala:542:17, :551:17]
assign nodeIn_d_bits_source = nodeOut_d_bits_source; // @[MixedNode.scala:542:17, :551:17]
assign nodeIn_d_bits_sink = nodeOut_d_bits_sink; // @[MixedNode.scala:542:17, :551:17]
reg [1:0] cam_s_0_state; // @[AtomicAutomata.scala:82:28]
reg [2:0] cam_a_0_bits_opcode; // @[AtomicAutomata.scala:83:24]
reg [2:0] cam_a_0_bits_param; // @[AtomicAutomata.scala:83:24]
reg [3:0] cam_a_0_bits_size; // @[AtomicAutomata.scala:83:24]
wire [3:0] source_c_bits_a_size = cam_a_0_bits_size; // @[Edges.scala:480:17]
wire [3:0] _source_c_bits_a_mask_sizeOH_T = cam_a_0_bits_size; // @[Misc.scala:202:34]
reg [7:0] cam_a_0_bits_source; // @[AtomicAutomata.scala:83:24]
wire [7:0] source_c_bits_a_source = cam_a_0_bits_source; // @[Edges.scala:480:17]
reg [28:0] cam_a_0_bits_address; // @[AtomicAutomata.scala:83:24]
wire [28:0] _source_c_bits_legal_T_14 = cam_a_0_bits_address; // @[AtomicAutomata.scala:83:24]
wire [28:0] source_c_bits_a_address = cam_a_0_bits_address; // @[Edges.scala:480:17]
reg [7:0] cam_a_0_bits_mask; // @[AtomicAutomata.scala:83:24]
reg [63:0] cam_a_0_bits_data; // @[AtomicAutomata.scala:83:24]
reg cam_a_0_bits_corrupt; // @[AtomicAutomata.scala:83:24]
reg [3:0] cam_a_0_lut; // @[AtomicAutomata.scala:83:24]
reg [63:0] cam_d_0_data; // @[AtomicAutomata.scala:84:24]
reg cam_d_0_denied; // @[AtomicAutomata.scala:84:24]
reg cam_d_0_corrupt; // @[AtomicAutomata.scala:84:24]
wire cam_free_0 = ~(|cam_s_0_state); // @[AtomicAutomata.scala:82:28, :86:44]
wire _a_cam_por_free_T = cam_free_0; // @[AtomicAutomata.scala:86:44, :115:58]
wire a_cam_sel_free_0 = cam_free_0; // @[AtomicAutomata.scala:86:44, :116:82]
wire _GEN = cam_s_0_state == 2'h2; // @[AtomicAutomata.scala:82:28, :87:44]
wire cam_amo_0; // @[AtomicAutomata.scala:87:44]
assign cam_amo_0 = _GEN; // @[AtomicAutomata.scala:87:44]
wire _cam_abusy_T_1; // @[AtomicAutomata.scala:88:68]
assign _cam_abusy_T_1 = _GEN; // @[AtomicAutomata.scala:87:44, :88:68]
wire _a_cam_por_put_T = cam_amo_0; // @[AtomicAutomata.scala:87:44, :102:56]
wire a_cam_sel_put_0 = cam_amo_0; // @[AtomicAutomata.scala:87:44, :103:80]
wire source_c_valid = cam_amo_0; // @[AtomicAutomata.scala:87:44, :165:28]
wire _cam_abusy_T = &cam_s_0_state; // @[AtomicAutomata.scala:82:28, :88:49]
wire cam_abusy_0 = _cam_abusy_T | _cam_abusy_T_1; // @[AtomicAutomata.scala:88:{49,57,68}]
wire a_cam_busy = cam_abusy_0; // @[AtomicAutomata.scala:88:57, :111:96]
wire cam_dmatch_0 = |cam_s_0_state; // @[AtomicAutomata.scala:82:28, :86:44, :89:49]
wire _GEN_0 = nodeIn_a_bits_size < 4'h4; // @[Parameters.scala:92:38]
wire _a_canLogical_T_1; // @[Parameters.scala:92:38]
assign _a_canLogical_T_1 = _GEN_0; // @[Parameters.scala:92:38]
wire _a_canArithmetic_T_1; // @[Parameters.scala:92:38]
assign _a_canArithmetic_T_1 = _GEN_0; // @[Parameters.scala:92:38]
wire _a_canLogical_T_2 = _a_canLogical_T_1; // @[Parameters.scala:92:{33,38}]
wire _a_canLogical_T_3 = _a_canLogical_T_2; // @[Parameters.scala:684:29]
wire [28:0] _GEN_1 = {nodeIn_a_bits_address[28:13], nodeIn_a_bits_address[12:0] ^ 13'h1000}; // @[Parameters.scala:137:31]
wire [28:0] _a_canLogical_T_4; // @[Parameters.scala:137:31]
assign _a_canLogical_T_4 = _GEN_1; // @[Parameters.scala:137:31]
wire [28:0] _a_canArithmetic_T_4; // @[Parameters.scala:137:31]
assign _a_canArithmetic_T_4 = _GEN_1; // @[Parameters.scala:137:31]
wire [29:0] _a_canLogical_T_5 = {1'h0, _a_canLogical_T_4}; // @[Parameters.scala:137:{31,41}]
wire [29:0] _a_canLogical_T_6 = _a_canLogical_T_5 & 30'h1A011000; // @[Parameters.scala:137:{41,46}]
wire [29:0] _a_canLogical_T_7 = _a_canLogical_T_6; // @[Parameters.scala:137:46]
wire _a_canLogical_T_8 = _a_canLogical_T_7 == 30'h0; // @[Parameters.scala:137:{46,59}]
wire [28:0] _GEN_2 = nodeIn_a_bits_address ^ 29'h10000000; // @[Parameters.scala:137:31]
wire [28:0] _a_canLogical_T_9; // @[Parameters.scala:137:31]
assign _a_canLogical_T_9 = _GEN_2; // @[Parameters.scala:137:31]
wire [28:0] _a_canArithmetic_T_9; // @[Parameters.scala:137:31]
assign _a_canArithmetic_T_9 = _GEN_2; // @[Parameters.scala:137:31]
wire [29:0] _a_canLogical_T_10 = {1'h0, _a_canLogical_T_9}; // @[Parameters.scala:137:{31,41}]
wire [29:0] _a_canLogical_T_11 = _a_canLogical_T_10 & 30'h1A001000; // @[Parameters.scala:137:{41,46}]
wire [29:0] _a_canLogical_T_12 = _a_canLogical_T_11; // @[Parameters.scala:137:46]
wire _a_canLogical_T_13 = _a_canLogical_T_12 == 30'h0; // @[Parameters.scala:137:{46,59}]
wire _a_canLogical_T_14 = _a_canLogical_T_8 | _a_canLogical_T_13; // @[Parameters.scala:685:42]
wire _a_canLogical_T_15 = _a_canLogical_T_3 & _a_canLogical_T_14; // @[Parameters.scala:684:{29,54}, :685:42]
wire _a_canLogical_T_47 = _a_canLogical_T_15; // @[Parameters.scala:684:54, :686:26]
wire [29:0] _a_canLogical_T_18 = {1'h0, _a_canLogical_T_17}; // @[Parameters.scala:137:{31,41}]
wire [29:0] _a_canLogical_T_19 = _a_canLogical_T_18 & 30'h1A001000; // @[Parameters.scala:137:{41,46}]
wire [29:0] _a_canLogical_T_20 = _a_canLogical_T_19; // @[Parameters.scala:137:46]
wire _a_canLogical_T_21 = _a_canLogical_T_20 == 30'h0; // @[Parameters.scala:137:{46,59}]
wire [28:0] _GEN_3 = {nodeIn_a_bits_address[28:17], nodeIn_a_bits_address[16:0] ^ 17'h10000}; // @[Parameters.scala:137:31]
wire [28:0] _a_canLogical_T_22; // @[Parameters.scala:137:31]
assign _a_canLogical_T_22 = _GEN_3; // @[Parameters.scala:137:31]
wire [28:0] _a_canArithmetic_T_22; // @[Parameters.scala:137:31]
assign _a_canArithmetic_T_22 = _GEN_3; // @[Parameters.scala:137:31]
wire [29:0] _a_canLogical_T_23 = {1'h0, _a_canLogical_T_22}; // @[Parameters.scala:137:{31,41}]
wire [29:0] _a_canLogical_T_24 = _a_canLogical_T_23 & 30'h1A010000; // @[Parameters.scala:137:{41,46}]
wire [29:0] _a_canLogical_T_25 = _a_canLogical_T_24; // @[Parameters.scala:137:46]
wire _a_canLogical_T_26 = _a_canLogical_T_25 == 30'h0; // @[Parameters.scala:137:{46,59}]
wire [28:0] _GEN_4 = {nodeIn_a_bits_address[28:26], nodeIn_a_bits_address[25:0] ^ 26'h2000000}; // @[Parameters.scala:137:31]
wire [28:0] _a_canLogical_T_27; // @[Parameters.scala:137:31]
assign _a_canLogical_T_27 = _GEN_4; // @[Parameters.scala:137:31]
wire [28:0] _a_canArithmetic_T_27; // @[Parameters.scala:137:31]
assign _a_canArithmetic_T_27 = _GEN_4; // @[Parameters.scala:137:31]
wire [29:0] _a_canLogical_T_28 = {1'h0, _a_canLogical_T_27}; // @[Parameters.scala:137:{31,41}]
wire [29:0] _a_canLogical_T_29 = _a_canLogical_T_28 & 30'h1A010000; // @[Parameters.scala:137:{41,46}]
wire [29:0] _a_canLogical_T_30 = _a_canLogical_T_29; // @[Parameters.scala:137:46]
wire _a_canLogical_T_31 = _a_canLogical_T_30 == 30'h0; // @[Parameters.scala:137:{46,59}]
wire [28:0] _GEN_5 = {nodeIn_a_bits_address[28:26], nodeIn_a_bits_address[25:0] ^ 26'h2010000}; // @[Parameters.scala:137:31]
wire [28:0] _a_canLogical_T_32; // @[Parameters.scala:137:31]
assign _a_canLogical_T_32 = _GEN_5; // @[Parameters.scala:137:31]
wire [28:0] _a_canArithmetic_T_32; // @[Parameters.scala:137:31]
assign _a_canArithmetic_T_32 = _GEN_5; // @[Parameters.scala:137:31]
wire [29:0] _a_canLogical_T_33 = {1'h0, _a_canLogical_T_32}; // @[Parameters.scala:137:{31,41}]
wire [29:0] _a_canLogical_T_34 = _a_canLogical_T_33 & 30'h1A011000; // @[Parameters.scala:137:{41,46}]
wire [29:0] _a_canLogical_T_35 = _a_canLogical_T_34; // @[Parameters.scala:137:46]
wire _a_canLogical_T_36 = _a_canLogical_T_35 == 30'h0; // @[Parameters.scala:137:{46,59}]
wire [28:0] _GEN_6 = {nodeIn_a_bits_address[28], nodeIn_a_bits_address[27:0] ^ 28'h8000000}; // @[Parameters.scala:137:31]
wire [28:0] _a_canLogical_T_37; // @[Parameters.scala:137:31]
assign _a_canLogical_T_37 = _GEN_6; // @[Parameters.scala:137:31]
wire [28:0] _a_canArithmetic_T_37; // @[Parameters.scala:137:31]
assign _a_canArithmetic_T_37 = _GEN_6; // @[Parameters.scala:137:31]
wire [29:0] _a_canLogical_T_38 = {1'h0, _a_canLogical_T_37}; // @[Parameters.scala:137:{31,41}]
wire [29:0] _a_canLogical_T_39 = _a_canLogical_T_38 & 30'h18000000; // @[Parameters.scala:137:{41,46}]
wire [29:0] _a_canLogical_T_40 = _a_canLogical_T_39; // @[Parameters.scala:137:46]
wire _a_canLogical_T_41 = _a_canLogical_T_40 == 30'h0; // @[Parameters.scala:137:{46,59}]
wire _a_canLogical_T_42 = _a_canLogical_T_21 | _a_canLogical_T_26; // @[Parameters.scala:685:42]
wire _a_canLogical_T_43 = _a_canLogical_T_42 | _a_canLogical_T_31; // @[Parameters.scala:685:42]
wire _a_canLogical_T_44 = _a_canLogical_T_43 | _a_canLogical_T_36; // @[Parameters.scala:685:42]
wire _a_canLogical_T_45 = _a_canLogical_T_44 | _a_canLogical_T_41; // @[Parameters.scala:685:42]
wire _a_canLogical_T_48 = _a_canLogical_T_47; // @[Parameters.scala:686:26]
wire a_canLogical = _a_canLogical_T_48; // @[Parameters.scala:686:26]
wire _a_canArithmetic_T_2 = _a_canArithmetic_T_1; // @[Parameters.scala:92:{33,38}]
wire _a_canArithmetic_T_3 = _a_canArithmetic_T_2; // @[Parameters.scala:684:29]
wire [29:0] _a_canArithmetic_T_5 = {1'h0, _a_canArithmetic_T_4}; // @[Parameters.scala:137:{31,41}]
wire [29:0] _a_canArithmetic_T_6 = _a_canArithmetic_T_5 & 30'h1A011000; // @[Parameters.scala:137:{41,46}]
wire [29:0] _a_canArithmetic_T_7 = _a_canArithmetic_T_6; // @[Parameters.scala:137:46]
wire _a_canArithmetic_T_8 = _a_canArithmetic_T_7 == 30'h0; // @[Parameters.scala:137:{46,59}]
wire [29:0] _a_canArithmetic_T_10 = {1'h0, _a_canArithmetic_T_9}; // @[Parameters.scala:137:{31,41}]
wire [29:0] _a_canArithmetic_T_11 = _a_canArithmetic_T_10 & 30'h1A001000; // @[Parameters.scala:137:{41,46}]
wire [29:0] _a_canArithmetic_T_12 = _a_canArithmetic_T_11; // @[Parameters.scala:137:46]
wire _a_canArithmetic_T_13 = _a_canArithmetic_T_12 == 30'h0; // @[Parameters.scala:137:{46,59}]
wire _a_canArithmetic_T_14 = _a_canArithmetic_T_8 | _a_canArithmetic_T_13; // @[Parameters.scala:685:42]
wire _a_canArithmetic_T_15 = _a_canArithmetic_T_3 & _a_canArithmetic_T_14; // @[Parameters.scala:684:{29,54}, :685:42]
wire _a_canArithmetic_T_47 = _a_canArithmetic_T_15; // @[Parameters.scala:684:54, :686:26]
wire [29:0] _a_canArithmetic_T_18 = {1'h0, _a_canArithmetic_T_17}; // @[Parameters.scala:137:{31,41}]
wire [29:0] _a_canArithmetic_T_19 = _a_canArithmetic_T_18 & 30'h1A001000; // @[Parameters.scala:137:{41,46}]
wire [29:0] _a_canArithmetic_T_20 = _a_canArithmetic_T_19; // @[Parameters.scala:137:46]
wire _a_canArithmetic_T_21 = _a_canArithmetic_T_20 == 30'h0; // @[Parameters.scala:137:{46,59}]
wire [29:0] _a_canArithmetic_T_23 = {1'h0, _a_canArithmetic_T_22}; // @[Parameters.scala:137:{31,41}]
wire [29:0] _a_canArithmetic_T_24 = _a_canArithmetic_T_23 & 30'h1A010000; // @[Parameters.scala:137:{41,46}]
wire [29:0] _a_canArithmetic_T_25 = _a_canArithmetic_T_24; // @[Parameters.scala:137:46]
wire _a_canArithmetic_T_26 = _a_canArithmetic_T_25 == 30'h0; // @[Parameters.scala:137:{46,59}]
wire [29:0] _a_canArithmetic_T_28 = {1'h0, _a_canArithmetic_T_27}; // @[Parameters.scala:137:{31,41}]
wire [29:0] _a_canArithmetic_T_29 = _a_canArithmetic_T_28 & 30'h1A010000; // @[Parameters.scala:137:{41,46}]
wire [29:0] _a_canArithmetic_T_30 = _a_canArithmetic_T_29; // @[Parameters.scala:137:46]
wire _a_canArithmetic_T_31 = _a_canArithmetic_T_30 == 30'h0; // @[Parameters.scala:137:{46,59}]
wire [29:0] _a_canArithmetic_T_33 = {1'h0, _a_canArithmetic_T_32}; // @[Parameters.scala:137:{31,41}]
wire [29:0] _a_canArithmetic_T_34 = _a_canArithmetic_T_33 & 30'h1A011000; // @[Parameters.scala:137:{41,46}]
wire [29:0] _a_canArithmetic_T_35 = _a_canArithmetic_T_34; // @[Parameters.scala:137:46]
wire _a_canArithmetic_T_36 = _a_canArithmetic_T_35 == 30'h0; // @[Parameters.scala:137:{46,59}]
wire [29:0] _a_canArithmetic_T_38 = {1'h0, _a_canArithmetic_T_37}; // @[Parameters.scala:137:{31,41}]
wire [29:0] _a_canArithmetic_T_39 = _a_canArithmetic_T_38 & 30'h18000000; // @[Parameters.scala:137:{41,46}]
wire [29:0] _a_canArithmetic_T_40 = _a_canArithmetic_T_39; // @[Parameters.scala:137:46]
wire _a_canArithmetic_T_41 = _a_canArithmetic_T_40 == 30'h0; // @[Parameters.scala:137:{46,59}]
wire _a_canArithmetic_T_42 = _a_canArithmetic_T_21 | _a_canArithmetic_T_26; // @[Parameters.scala:685:42]
wire _a_canArithmetic_T_43 = _a_canArithmetic_T_42 | _a_canArithmetic_T_31; // @[Parameters.scala:685:42]
wire _a_canArithmetic_T_44 = _a_canArithmetic_T_43 | _a_canArithmetic_T_36; // @[Parameters.scala:685:42]
wire _a_canArithmetic_T_45 = _a_canArithmetic_T_44 | _a_canArithmetic_T_41; // @[Parameters.scala:685:42]
wire _a_canArithmetic_T_48 = _a_canArithmetic_T_47; // @[Parameters.scala:686:26]
wire a_canArithmetic = _a_canArithmetic_T_48; // @[Parameters.scala:686:26]
wire a_isLogical = nodeIn_a_bits_opcode == 3'h3; // @[AtomicAutomata.scala:96:47]
wire a_isArithmetic = nodeIn_a_bits_opcode == 3'h2; // @[AtomicAutomata.scala:97:47]
wire _a_isSupported_T = ~a_isArithmetic | a_canArithmetic; // @[AtomicAutomata.scala:95:45, :97:47, :98:63]
wire a_isSupported = a_isLogical ? a_canLogical : _a_isSupported_T; // @[AtomicAutomata.scala:94:45, :96:47, :98:{32,63}]
wire [29:0] _a_fifoId_T_1 = {1'h0, _a_fifoId_T}; // @[Parameters.scala:137:{31,41}]
wire _indexes_T = cam_a_0_bits_data[0]; // @[AtomicAutomata.scala:83:24, :119:63]
wire _indexes_T_1 = cam_d_0_data[0]; // @[AtomicAutomata.scala:84:24, :119:73]
wire [1:0] indexes_0 = {_indexes_T, _indexes_T_1}; // @[AtomicAutomata.scala:119:{59,63,73}]
wire _indexes_T_2 = cam_a_0_bits_data[1]; // @[AtomicAutomata.scala:83:24, :119:63]
wire _indexes_T_3 = cam_d_0_data[1]; // @[AtomicAutomata.scala:84:24, :119:73]
wire [1:0] indexes_1 = {_indexes_T_2, _indexes_T_3}; // @[AtomicAutomata.scala:119:{59,63,73}]
wire _indexes_T_4 = cam_a_0_bits_data[2]; // @[AtomicAutomata.scala:83:24, :119:63]
wire _indexes_T_5 = cam_d_0_data[2]; // @[AtomicAutomata.scala:84:24, :119:73]
wire [1:0] indexes_2 = {_indexes_T_4, _indexes_T_5}; // @[AtomicAutomata.scala:119:{59,63,73}]
wire _indexes_T_6 = cam_a_0_bits_data[3]; // @[AtomicAutomata.scala:83:24, :119:63]
wire _indexes_T_7 = cam_d_0_data[3]; // @[AtomicAutomata.scala:84:24, :119:73]
wire [1:0] indexes_3 = {_indexes_T_6, _indexes_T_7}; // @[AtomicAutomata.scala:119:{59,63,73}]
wire _indexes_T_8 = cam_a_0_bits_data[4]; // @[AtomicAutomata.scala:83:24, :119:63]
wire _indexes_T_9 = cam_d_0_data[4]; // @[AtomicAutomata.scala:84:24, :119:73]
wire [1:0] indexes_4 = {_indexes_T_8, _indexes_T_9}; // @[AtomicAutomata.scala:119:{59,63,73}]
wire _indexes_T_10 = cam_a_0_bits_data[5]; // @[AtomicAutomata.scala:83:24, :119:63]
wire _indexes_T_11 = cam_d_0_data[5]; // @[AtomicAutomata.scala:84:24, :119:73]
wire [1:0] indexes_5 = {_indexes_T_10, _indexes_T_11}; // @[AtomicAutomata.scala:119:{59,63,73}]
wire _indexes_T_12 = cam_a_0_bits_data[6]; // @[AtomicAutomata.scala:83:24, :119:63]
wire _indexes_T_13 = cam_d_0_data[6]; // @[AtomicAutomata.scala:84:24, :119:73]
wire [1:0] indexes_6 = {_indexes_T_12, _indexes_T_13}; // @[AtomicAutomata.scala:119:{59,63,73}]
wire _indexes_T_14 = cam_a_0_bits_data[7]; // @[AtomicAutomata.scala:83:24, :119:63]
wire _signbits_a_T = cam_a_0_bits_data[7]; // @[AtomicAutomata.scala:83:24, :119:63, :128:64]
wire _indexes_T_15 = cam_d_0_data[7]; // @[AtomicAutomata.scala:84:24, :119:73]
wire _signbits_d_T = cam_d_0_data[7]; // @[AtomicAutomata.scala:84:24, :119:73, :129:64]
wire [1:0] indexes_7 = {_indexes_T_14, _indexes_T_15}; // @[AtomicAutomata.scala:119:{59,63,73}]
wire _indexes_T_16 = cam_a_0_bits_data[8]; // @[AtomicAutomata.scala:83:24, :119:63]
wire _indexes_T_17 = cam_d_0_data[8]; // @[AtomicAutomata.scala:84:24, :119:73]
wire [1:0] indexes_8 = {_indexes_T_16, _indexes_T_17}; // @[AtomicAutomata.scala:119:{59,63,73}]
wire _indexes_T_18 = cam_a_0_bits_data[9]; // @[AtomicAutomata.scala:83:24, :119:63]
wire _indexes_T_19 = cam_d_0_data[9]; // @[AtomicAutomata.scala:84:24, :119:73]
wire [1:0] indexes_9 = {_indexes_T_18, _indexes_T_19}; // @[AtomicAutomata.scala:119:{59,63,73}]
wire _indexes_T_20 = cam_a_0_bits_data[10]; // @[AtomicAutomata.scala:83:24, :119:63]
wire _indexes_T_21 = cam_d_0_data[10]; // @[AtomicAutomata.scala:84:24, :119:73]
wire [1:0] indexes_10 = {_indexes_T_20, _indexes_T_21}; // @[AtomicAutomata.scala:119:{59,63,73}]
wire _indexes_T_22 = cam_a_0_bits_data[11]; // @[AtomicAutomata.scala:83:24, :119:63]
wire _indexes_T_23 = cam_d_0_data[11]; // @[AtomicAutomata.scala:84:24, :119:73]
wire [1:0] indexes_11 = {_indexes_T_22, _indexes_T_23}; // @[AtomicAutomata.scala:119:{59,63,73}]
wire _indexes_T_24 = cam_a_0_bits_data[12]; // @[AtomicAutomata.scala:83:24, :119:63]
wire _indexes_T_25 = cam_d_0_data[12]; // @[AtomicAutomata.scala:84:24, :119:73]
wire [1:0] indexes_12 = {_indexes_T_24, _indexes_T_25}; // @[AtomicAutomata.scala:119:{59,63,73}]
wire _indexes_T_26 = cam_a_0_bits_data[13]; // @[AtomicAutomata.scala:83:24, :119:63]
wire _indexes_T_27 = cam_d_0_data[13]; // @[AtomicAutomata.scala:84:24, :119:73]
wire [1:0] indexes_13 = {_indexes_T_26, _indexes_T_27}; // @[AtomicAutomata.scala:119:{59,63,73}]
wire _indexes_T_28 = cam_a_0_bits_data[14]; // @[AtomicAutomata.scala:83:24, :119:63]
wire _indexes_T_29 = cam_d_0_data[14]; // @[AtomicAutomata.scala:84:24, :119:73]
wire [1:0] indexes_14 = {_indexes_T_28, _indexes_T_29}; // @[AtomicAutomata.scala:119:{59,63,73}]
wire _indexes_T_30 = cam_a_0_bits_data[15]; // @[AtomicAutomata.scala:83:24, :119:63]
wire _signbits_a_T_1 = cam_a_0_bits_data[15]; // @[AtomicAutomata.scala:83:24, :119:63, :128:64]
wire _indexes_T_31 = cam_d_0_data[15]; // @[AtomicAutomata.scala:84:24, :119:73]
wire _signbits_d_T_1 = cam_d_0_data[15]; // @[AtomicAutomata.scala:84:24, :119:73, :129:64]
wire [1:0] indexes_15 = {_indexes_T_30, _indexes_T_31}; // @[AtomicAutomata.scala:119:{59,63,73}]
wire _indexes_T_32 = cam_a_0_bits_data[16]; // @[AtomicAutomata.scala:83:24, :119:63]
wire _indexes_T_33 = cam_d_0_data[16]; // @[AtomicAutomata.scala:84:24, :119:73]
wire [1:0] indexes_16 = {_indexes_T_32, _indexes_T_33}; // @[AtomicAutomata.scala:119:{59,63,73}]
wire _indexes_T_34 = cam_a_0_bits_data[17]; // @[AtomicAutomata.scala:83:24, :119:63]
wire _indexes_T_35 = cam_d_0_data[17]; // @[AtomicAutomata.scala:84:24, :119:73]
wire [1:0] indexes_17 = {_indexes_T_34, _indexes_T_35}; // @[AtomicAutomata.scala:119:{59,63,73}]
wire _indexes_T_36 = cam_a_0_bits_data[18]; // @[AtomicAutomata.scala:83:24, :119:63]
wire _indexes_T_37 = cam_d_0_data[18]; // @[AtomicAutomata.scala:84:24, :119:73]
wire [1:0] indexes_18 = {_indexes_T_36, _indexes_T_37}; // @[AtomicAutomata.scala:119:{59,63,73}]
wire _indexes_T_38 = cam_a_0_bits_data[19]; // @[AtomicAutomata.scala:83:24, :119:63]
wire _indexes_T_39 = cam_d_0_data[19]; // @[AtomicAutomata.scala:84:24, :119:73]
wire [1:0] indexes_19 = {_indexes_T_38, _indexes_T_39}; // @[AtomicAutomata.scala:119:{59,63,73}]
wire _indexes_T_40 = cam_a_0_bits_data[20]; // @[AtomicAutomata.scala:83:24, :119:63]
wire _indexes_T_41 = cam_d_0_data[20]; // @[AtomicAutomata.scala:84:24, :119:73]
wire [1:0] indexes_20 = {_indexes_T_40, _indexes_T_41}; // @[AtomicAutomata.scala:119:{59,63,73}]
wire _indexes_T_42 = cam_a_0_bits_data[21]; // @[AtomicAutomata.scala:83:24, :119:63]
wire _indexes_T_43 = cam_d_0_data[21]; // @[AtomicAutomata.scala:84:24, :119:73]
wire [1:0] indexes_21 = {_indexes_T_42, _indexes_T_43}; // @[AtomicAutomata.scala:119:{59,63,73}]
wire _indexes_T_44 = cam_a_0_bits_data[22]; // @[AtomicAutomata.scala:83:24, :119:63]
wire _indexes_T_45 = cam_d_0_data[22]; // @[AtomicAutomata.scala:84:24, :119:73]
wire [1:0] indexes_22 = {_indexes_T_44, _indexes_T_45}; // @[AtomicAutomata.scala:119:{59,63,73}]
wire _indexes_T_46 = cam_a_0_bits_data[23]; // @[AtomicAutomata.scala:83:24, :119:63]
wire _signbits_a_T_2 = cam_a_0_bits_data[23]; // @[AtomicAutomata.scala:83:24, :119:63, :128:64]
wire _indexes_T_47 = cam_d_0_data[23]; // @[AtomicAutomata.scala:84:24, :119:73]
wire _signbits_d_T_2 = cam_d_0_data[23]; // @[AtomicAutomata.scala:84:24, :119:73, :129:64]
wire [1:0] indexes_23 = {_indexes_T_46, _indexes_T_47}; // @[AtomicAutomata.scala:119:{59,63,73}]
wire _indexes_T_48 = cam_a_0_bits_data[24]; // @[AtomicAutomata.scala:83:24, :119:63]
wire _indexes_T_49 = cam_d_0_data[24]; // @[AtomicAutomata.scala:84:24, :119:73]
wire [1:0] indexes_24 = {_indexes_T_48, _indexes_T_49}; // @[AtomicAutomata.scala:119:{59,63,73}]
wire _indexes_T_50 = cam_a_0_bits_data[25]; // @[AtomicAutomata.scala:83:24, :119:63]
wire _indexes_T_51 = cam_d_0_data[25]; // @[AtomicAutomata.scala:84:24, :119:73]
wire [1:0] indexes_25 = {_indexes_T_50, _indexes_T_51}; // @[AtomicAutomata.scala:119:{59,63,73}]
wire _indexes_T_52 = cam_a_0_bits_data[26]; // @[AtomicAutomata.scala:83:24, :119:63]
wire _indexes_T_53 = cam_d_0_data[26]; // @[AtomicAutomata.scala:84:24, :119:73]
wire [1:0] indexes_26 = {_indexes_T_52, _indexes_T_53}; // @[AtomicAutomata.scala:119:{59,63,73}]
wire _indexes_T_54 = cam_a_0_bits_data[27]; // @[AtomicAutomata.scala:83:24, :119:63]
wire _indexes_T_55 = cam_d_0_data[27]; // @[AtomicAutomata.scala:84:24, :119:73]
wire [1:0] indexes_27 = {_indexes_T_54, _indexes_T_55}; // @[AtomicAutomata.scala:119:{59,63,73}]
wire _indexes_T_56 = cam_a_0_bits_data[28]; // @[AtomicAutomata.scala:83:24, :119:63]
wire _indexes_T_57 = cam_d_0_data[28]; // @[AtomicAutomata.scala:84:24, :119:73]
wire [1:0] indexes_28 = {_indexes_T_56, _indexes_T_57}; // @[AtomicAutomata.scala:119:{59,63,73}]
wire _indexes_T_58 = cam_a_0_bits_data[29]; // @[AtomicAutomata.scala:83:24, :119:63]
wire _indexes_T_59 = cam_d_0_data[29]; // @[AtomicAutomata.scala:84:24, :119:73]
wire [1:0] indexes_29 = {_indexes_T_58, _indexes_T_59}; // @[AtomicAutomata.scala:119:{59,63,73}]
wire _indexes_T_60 = cam_a_0_bits_data[30]; // @[AtomicAutomata.scala:83:24, :119:63]
wire _indexes_T_61 = cam_d_0_data[30]; // @[AtomicAutomata.scala:84:24, :119:73]
wire [1:0] indexes_30 = {_indexes_T_60, _indexes_T_61}; // @[AtomicAutomata.scala:119:{59,63,73}]
wire _indexes_T_62 = cam_a_0_bits_data[31]; // @[AtomicAutomata.scala:83:24, :119:63]
wire _signbits_a_T_3 = cam_a_0_bits_data[31]; // @[AtomicAutomata.scala:83:24, :119:63, :128:64]
wire _indexes_T_63 = cam_d_0_data[31]; // @[AtomicAutomata.scala:84:24, :119:73]
wire _signbits_d_T_3 = cam_d_0_data[31]; // @[AtomicAutomata.scala:84:24, :119:73, :129:64]
wire [1:0] indexes_31 = {_indexes_T_62, _indexes_T_63}; // @[AtomicAutomata.scala:119:{59,63,73}]
wire _indexes_T_64 = cam_a_0_bits_data[32]; // @[AtomicAutomata.scala:83:24, :119:63]
wire _indexes_T_65 = cam_d_0_data[32]; // @[AtomicAutomata.scala:84:24, :119:73]
wire [1:0] indexes_32 = {_indexes_T_64, _indexes_T_65}; // @[AtomicAutomata.scala:119:{59,63,73}]
wire _indexes_T_66 = cam_a_0_bits_data[33]; // @[AtomicAutomata.scala:83:24, :119:63]
wire _indexes_T_67 = cam_d_0_data[33]; // @[AtomicAutomata.scala:84:24, :119:73]
wire [1:0] indexes_33 = {_indexes_T_66, _indexes_T_67}; // @[AtomicAutomata.scala:119:{59,63,73}]
wire _indexes_T_68 = cam_a_0_bits_data[34]; // @[AtomicAutomata.scala:83:24, :119:63]
wire _indexes_T_69 = cam_d_0_data[34]; // @[AtomicAutomata.scala:84:24, :119:73]
wire [1:0] indexes_34 = {_indexes_T_68, _indexes_T_69}; // @[AtomicAutomata.scala:119:{59,63,73}]
wire _indexes_T_70 = cam_a_0_bits_data[35]; // @[AtomicAutomata.scala:83:24, :119:63]
wire _indexes_T_71 = cam_d_0_data[35]; // @[AtomicAutomata.scala:84:24, :119:73]
wire [1:0] indexes_35 = {_indexes_T_70, _indexes_T_71}; // @[AtomicAutomata.scala:119:{59,63,73}]
wire _indexes_T_72 = cam_a_0_bits_data[36]; // @[AtomicAutomata.scala:83:24, :119:63]
wire _indexes_T_73 = cam_d_0_data[36]; // @[AtomicAutomata.scala:84:24, :119:73]
wire [1:0] indexes_36 = {_indexes_T_72, _indexes_T_73}; // @[AtomicAutomata.scala:119:{59,63,73}]
wire _indexes_T_74 = cam_a_0_bits_data[37]; // @[AtomicAutomata.scala:83:24, :119:63]
wire _indexes_T_75 = cam_d_0_data[37]; // @[AtomicAutomata.scala:84:24, :119:73]
wire [1:0] indexes_37 = {_indexes_T_74, _indexes_T_75}; // @[AtomicAutomata.scala:119:{59,63,73}]
wire _indexes_T_76 = cam_a_0_bits_data[38]; // @[AtomicAutomata.scala:83:24, :119:63]
wire _indexes_T_77 = cam_d_0_data[38]; // @[AtomicAutomata.scala:84:24, :119:73]
wire [1:0] indexes_38 = {_indexes_T_76, _indexes_T_77}; // @[AtomicAutomata.scala:119:{59,63,73}]
wire _indexes_T_78 = cam_a_0_bits_data[39]; // @[AtomicAutomata.scala:83:24, :119:63]
wire _signbits_a_T_4 = cam_a_0_bits_data[39]; // @[AtomicAutomata.scala:83:24, :119:63, :128:64]
wire _indexes_T_79 = cam_d_0_data[39]; // @[AtomicAutomata.scala:84:24, :119:73]
wire _signbits_d_T_4 = cam_d_0_data[39]; // @[AtomicAutomata.scala:84:24, :119:73, :129:64]
wire [1:0] indexes_39 = {_indexes_T_78, _indexes_T_79}; // @[AtomicAutomata.scala:119:{59,63,73}]
wire _indexes_T_80 = cam_a_0_bits_data[40]; // @[AtomicAutomata.scala:83:24, :119:63]
wire _indexes_T_81 = cam_d_0_data[40]; // @[AtomicAutomata.scala:84:24, :119:73]
wire [1:0] indexes_40 = {_indexes_T_80, _indexes_T_81}; // @[AtomicAutomata.scala:119:{59,63,73}]
wire _indexes_T_82 = cam_a_0_bits_data[41]; // @[AtomicAutomata.scala:83:24, :119:63]
wire _indexes_T_83 = cam_d_0_data[41]; // @[AtomicAutomata.scala:84:24, :119:73]
wire [1:0] indexes_41 = {_indexes_T_82, _indexes_T_83}; // @[AtomicAutomata.scala:119:{59,63,73}]
wire _indexes_T_84 = cam_a_0_bits_data[42]; // @[AtomicAutomata.scala:83:24, :119:63]
wire _indexes_T_85 = cam_d_0_data[42]; // @[AtomicAutomata.scala:84:24, :119:73]
wire [1:0] indexes_42 = {_indexes_T_84, _indexes_T_85}; // @[AtomicAutomata.scala:119:{59,63,73}]
wire _indexes_T_86 = cam_a_0_bits_data[43]; // @[AtomicAutomata.scala:83:24, :119:63]
wire _indexes_T_87 = cam_d_0_data[43]; // @[AtomicAutomata.scala:84:24, :119:73]
wire [1:0] indexes_43 = {_indexes_T_86, _indexes_T_87}; // @[AtomicAutomata.scala:119:{59,63,73}]
wire _indexes_T_88 = cam_a_0_bits_data[44]; // @[AtomicAutomata.scala:83:24, :119:63]
wire _indexes_T_89 = cam_d_0_data[44]; // @[AtomicAutomata.scala:84:24, :119:73]
wire [1:0] indexes_44 = {_indexes_T_88, _indexes_T_89}; // @[AtomicAutomata.scala:119:{59,63,73}]
wire _indexes_T_90 = cam_a_0_bits_data[45]; // @[AtomicAutomata.scala:83:24, :119:63]
wire _indexes_T_91 = cam_d_0_data[45]; // @[AtomicAutomata.scala:84:24, :119:73]
wire [1:0] indexes_45 = {_indexes_T_90, _indexes_T_91}; // @[AtomicAutomata.scala:119:{59,63,73}]
wire _indexes_T_92 = cam_a_0_bits_data[46]; // @[AtomicAutomata.scala:83:24, :119:63]
wire _indexes_T_93 = cam_d_0_data[46]; // @[AtomicAutomata.scala:84:24, :119:73]
wire [1:0] indexes_46 = {_indexes_T_92, _indexes_T_93}; // @[AtomicAutomata.scala:119:{59,63,73}]
wire _indexes_T_94 = cam_a_0_bits_data[47]; // @[AtomicAutomata.scala:83:24, :119:63]
wire _signbits_a_T_5 = cam_a_0_bits_data[47]; // @[AtomicAutomata.scala:83:24, :119:63, :128:64]
wire _indexes_T_95 = cam_d_0_data[47]; // @[AtomicAutomata.scala:84:24, :119:73]
wire _signbits_d_T_5 = cam_d_0_data[47]; // @[AtomicAutomata.scala:84:24, :119:73, :129:64]
wire [1:0] indexes_47 = {_indexes_T_94, _indexes_T_95}; // @[AtomicAutomata.scala:119:{59,63,73}]
wire _indexes_T_96 = cam_a_0_bits_data[48]; // @[AtomicAutomata.scala:83:24, :119:63]
wire _indexes_T_97 = cam_d_0_data[48]; // @[AtomicAutomata.scala:84:24, :119:73]
wire [1:0] indexes_48 = {_indexes_T_96, _indexes_T_97}; // @[AtomicAutomata.scala:119:{59,63,73}]
wire _indexes_T_98 = cam_a_0_bits_data[49]; // @[AtomicAutomata.scala:83:24, :119:63]
wire _indexes_T_99 = cam_d_0_data[49]; // @[AtomicAutomata.scala:84:24, :119:73]
wire [1:0] indexes_49 = {_indexes_T_98, _indexes_T_99}; // @[AtomicAutomata.scala:119:{59,63,73}]
wire _indexes_T_100 = cam_a_0_bits_data[50]; // @[AtomicAutomata.scala:83:24, :119:63]
wire _indexes_T_101 = cam_d_0_data[50]; // @[AtomicAutomata.scala:84:24, :119:73]
wire [1:0] indexes_50 = {_indexes_T_100, _indexes_T_101}; // @[AtomicAutomata.scala:119:{59,63,73}]
wire _indexes_T_102 = cam_a_0_bits_data[51]; // @[AtomicAutomata.scala:83:24, :119:63]
wire _indexes_T_103 = cam_d_0_data[51]; // @[AtomicAutomata.scala:84:24, :119:73]
wire [1:0] indexes_51 = {_indexes_T_102, _indexes_T_103}; // @[AtomicAutomata.scala:119:{59,63,73}]
wire _indexes_T_104 = cam_a_0_bits_data[52]; // @[AtomicAutomata.scala:83:24, :119:63]
wire _indexes_T_105 = cam_d_0_data[52]; // @[AtomicAutomata.scala:84:24, :119:73]
wire [1:0] indexes_52 = {_indexes_T_104, _indexes_T_105}; // @[AtomicAutomata.scala:119:{59,63,73}]
wire _indexes_T_106 = cam_a_0_bits_data[53]; // @[AtomicAutomata.scala:83:24, :119:63]
wire _indexes_T_107 = cam_d_0_data[53]; // @[AtomicAutomata.scala:84:24, :119:73]
wire [1:0] indexes_53 = {_indexes_T_106, _indexes_T_107}; // @[AtomicAutomata.scala:119:{59,63,73}]
wire _indexes_T_108 = cam_a_0_bits_data[54]; // @[AtomicAutomata.scala:83:24, :119:63]
wire _indexes_T_109 = cam_d_0_data[54]; // @[AtomicAutomata.scala:84:24, :119:73]
wire [1:0] indexes_54 = {_indexes_T_108, _indexes_T_109}; // @[AtomicAutomata.scala:119:{59,63,73}]
wire _indexes_T_110 = cam_a_0_bits_data[55]; // @[AtomicAutomata.scala:83:24, :119:63]
wire _signbits_a_T_6 = cam_a_0_bits_data[55]; // @[AtomicAutomata.scala:83:24, :119:63, :128:64]
wire _indexes_T_111 = cam_d_0_data[55]; // @[AtomicAutomata.scala:84:24, :119:73]
wire _signbits_d_T_6 = cam_d_0_data[55]; // @[AtomicAutomata.scala:84:24, :119:73, :129:64]
wire [1:0] indexes_55 = {_indexes_T_110, _indexes_T_111}; // @[AtomicAutomata.scala:119:{59,63,73}]
wire _indexes_T_112 = cam_a_0_bits_data[56]; // @[AtomicAutomata.scala:83:24, :119:63]
wire _indexes_T_113 = cam_d_0_data[56]; // @[AtomicAutomata.scala:84:24, :119:73]
wire [1:0] indexes_56 = {_indexes_T_112, _indexes_T_113}; // @[AtomicAutomata.scala:119:{59,63,73}]
wire _indexes_T_114 = cam_a_0_bits_data[57]; // @[AtomicAutomata.scala:83:24, :119:63]
wire _indexes_T_115 = cam_d_0_data[57]; // @[AtomicAutomata.scala:84:24, :119:73]
wire [1:0] indexes_57 = {_indexes_T_114, _indexes_T_115}; // @[AtomicAutomata.scala:119:{59,63,73}]
wire _indexes_T_116 = cam_a_0_bits_data[58]; // @[AtomicAutomata.scala:83:24, :119:63]
wire _indexes_T_117 = cam_d_0_data[58]; // @[AtomicAutomata.scala:84:24, :119:73]
wire [1:0] indexes_58 = {_indexes_T_116, _indexes_T_117}; // @[AtomicAutomata.scala:119:{59,63,73}]
wire _indexes_T_118 = cam_a_0_bits_data[59]; // @[AtomicAutomata.scala:83:24, :119:63]
wire _indexes_T_119 = cam_d_0_data[59]; // @[AtomicAutomata.scala:84:24, :119:73]
wire [1:0] indexes_59 = {_indexes_T_118, _indexes_T_119}; // @[AtomicAutomata.scala:119:{59,63,73}]
wire _indexes_T_120 = cam_a_0_bits_data[60]; // @[AtomicAutomata.scala:83:24, :119:63]
wire _indexes_T_121 = cam_d_0_data[60]; // @[AtomicAutomata.scala:84:24, :119:73]
wire [1:0] indexes_60 = {_indexes_T_120, _indexes_T_121}; // @[AtomicAutomata.scala:119:{59,63,73}]
wire _indexes_T_122 = cam_a_0_bits_data[61]; // @[AtomicAutomata.scala:83:24, :119:63]
wire _indexes_T_123 = cam_d_0_data[61]; // @[AtomicAutomata.scala:84:24, :119:73]
wire [1:0] indexes_61 = {_indexes_T_122, _indexes_T_123}; // @[AtomicAutomata.scala:119:{59,63,73}]
wire _indexes_T_124 = cam_a_0_bits_data[62]; // @[AtomicAutomata.scala:83:24, :119:63]
wire _indexes_T_125 = cam_d_0_data[62]; // @[AtomicAutomata.scala:84:24, :119:73]
wire [1:0] indexes_62 = {_indexes_T_124, _indexes_T_125}; // @[AtomicAutomata.scala:119:{59,63,73}]
wire _indexes_T_126 = cam_a_0_bits_data[63]; // @[AtomicAutomata.scala:83:24, :119:63]
wire _signbits_a_T_7 = cam_a_0_bits_data[63]; // @[AtomicAutomata.scala:83:24, :119:63, :128:64]
wire _indexes_T_127 = cam_d_0_data[63]; // @[AtomicAutomata.scala:84:24, :119:73]
wire _signbits_d_T_7 = cam_d_0_data[63]; // @[AtomicAutomata.scala:84:24, :119:73, :129:64]
wire [1:0] indexes_63 = {_indexes_T_126, _indexes_T_127}; // @[AtomicAutomata.scala:119:{59,63,73}]
wire [3:0] _logic_out_T = cam_a_0_lut >> indexes_0; // @[AtomicAutomata.scala:83:24, :119:59, :120:57]
wire _logic_out_T_1 = _logic_out_T[0]; // @[AtomicAutomata.scala:120:57]
wire [3:0] _logic_out_T_2 = cam_a_0_lut >> indexes_1; // @[AtomicAutomata.scala:83:24, :119:59, :120:57]
wire _logic_out_T_3 = _logic_out_T_2[0]; // @[AtomicAutomata.scala:120:57]
wire [3:0] _logic_out_T_4 = cam_a_0_lut >> indexes_2; // @[AtomicAutomata.scala:83:24, :119:59, :120:57]
wire _logic_out_T_5 = _logic_out_T_4[0]; // @[AtomicAutomata.scala:120:57]
wire [3:0] _logic_out_T_6 = cam_a_0_lut >> indexes_3; // @[AtomicAutomata.scala:83:24, :119:59, :120:57]
wire _logic_out_T_7 = _logic_out_T_6[0]; // @[AtomicAutomata.scala:120:57]
wire [3:0] _logic_out_T_8 = cam_a_0_lut >> indexes_4; // @[AtomicAutomata.scala:83:24, :119:59, :120:57]
wire _logic_out_T_9 = _logic_out_T_8[0]; // @[AtomicAutomata.scala:120:57]
wire [3:0] _logic_out_T_10 = cam_a_0_lut >> indexes_5; // @[AtomicAutomata.scala:83:24, :119:59, :120:57]
wire _logic_out_T_11 = _logic_out_T_10[0]; // @[AtomicAutomata.scala:120:57]
wire [3:0] _logic_out_T_12 = cam_a_0_lut >> indexes_6; // @[AtomicAutomata.scala:83:24, :119:59, :120:57]
wire _logic_out_T_13 = _logic_out_T_12[0]; // @[AtomicAutomata.scala:120:57]
wire [3:0] _logic_out_T_14 = cam_a_0_lut >> indexes_7; // @[AtomicAutomata.scala:83:24, :119:59, :120:57]
wire _logic_out_T_15 = _logic_out_T_14[0]; // @[AtomicAutomata.scala:120:57]
wire [3:0] _logic_out_T_16 = cam_a_0_lut >> indexes_8; // @[AtomicAutomata.scala:83:24, :119:59, :120:57]
wire _logic_out_T_17 = _logic_out_T_16[0]; // @[AtomicAutomata.scala:120:57]
wire [3:0] _logic_out_T_18 = cam_a_0_lut >> indexes_9; // @[AtomicAutomata.scala:83:24, :119:59, :120:57]
wire _logic_out_T_19 = _logic_out_T_18[0]; // @[AtomicAutomata.scala:120:57]
wire [3:0] _logic_out_T_20 = cam_a_0_lut >> indexes_10; // @[AtomicAutomata.scala:83:24, :119:59, :120:57]
wire _logic_out_T_21 = _logic_out_T_20[0]; // @[AtomicAutomata.scala:120:57]
wire [3:0] _logic_out_T_22 = cam_a_0_lut >> indexes_11; // @[AtomicAutomata.scala:83:24, :119:59, :120:57]
wire _logic_out_T_23 = _logic_out_T_22[0]; // @[AtomicAutomata.scala:120:57]
wire [3:0] _logic_out_T_24 = cam_a_0_lut >> indexes_12; // @[AtomicAutomata.scala:83:24, :119:59, :120:57]
wire _logic_out_T_25 = _logic_out_T_24[0]; // @[AtomicAutomata.scala:120:57]
wire [3:0] _logic_out_T_26 = cam_a_0_lut >> indexes_13; // @[AtomicAutomata.scala:83:24, :119:59, :120:57]
wire _logic_out_T_27 = _logic_out_T_26[0]; // @[AtomicAutomata.scala:120:57]
wire [3:0] _logic_out_T_28 = cam_a_0_lut >> indexes_14; // @[AtomicAutomata.scala:83:24, :119:59, :120:57]
wire _logic_out_T_29 = _logic_out_T_28[0]; // @[AtomicAutomata.scala:120:57]
wire [3:0] _logic_out_T_30 = cam_a_0_lut >> indexes_15; // @[AtomicAutomata.scala:83:24, :119:59, :120:57]
wire _logic_out_T_31 = _logic_out_T_30[0]; // @[AtomicAutomata.scala:120:57]
wire [3:0] _logic_out_T_32 = cam_a_0_lut >> indexes_16; // @[AtomicAutomata.scala:83:24, :119:59, :120:57]
wire _logic_out_T_33 = _logic_out_T_32[0]; // @[AtomicAutomata.scala:120:57]
wire [3:0] _logic_out_T_34 = cam_a_0_lut >> indexes_17; // @[AtomicAutomata.scala:83:24, :119:59, :120:57]
wire _logic_out_T_35 = _logic_out_T_34[0]; // @[AtomicAutomata.scala:120:57]
wire [3:0] _logic_out_T_36 = cam_a_0_lut >> indexes_18; // @[AtomicAutomata.scala:83:24, :119:59, :120:57]
wire _logic_out_T_37 = _logic_out_T_36[0]; // @[AtomicAutomata.scala:120:57]
wire [3:0] _logic_out_T_38 = cam_a_0_lut >> indexes_19; // @[AtomicAutomata.scala:83:24, :119:59, :120:57]
wire _logic_out_T_39 = _logic_out_T_38[0]; // @[AtomicAutomata.scala:120:57]
wire [3:0] _logic_out_T_40 = cam_a_0_lut >> indexes_20; // @[AtomicAutomata.scala:83:24, :119:59, :120:57]
wire _logic_out_T_41 = _logic_out_T_40[0]; // @[AtomicAutomata.scala:120:57]
wire [3:0] _logic_out_T_42 = cam_a_0_lut >> indexes_21; // @[AtomicAutomata.scala:83:24, :119:59, :120:57]
wire _logic_out_T_43 = _logic_out_T_42[0]; // @[AtomicAutomata.scala:120:57]
wire [3:0] _logic_out_T_44 = cam_a_0_lut >> indexes_22; // @[AtomicAutomata.scala:83:24, :119:59, :120:57]
wire _logic_out_T_45 = _logic_out_T_44[0]; // @[AtomicAutomata.scala:120:57]
wire [3:0] _logic_out_T_46 = cam_a_0_lut >> indexes_23; // @[AtomicAutomata.scala:83:24, :119:59, :120:57]
wire _logic_out_T_47 = _logic_out_T_46[0]; // @[AtomicAutomata.scala:120:57]
wire [3:0] _logic_out_T_48 = cam_a_0_lut >> indexes_24; // @[AtomicAutomata.scala:83:24, :119:59, :120:57]
wire _logic_out_T_49 = _logic_out_T_48[0]; // @[AtomicAutomata.scala:120:57]
wire [3:0] _logic_out_T_50 = cam_a_0_lut >> indexes_25; // @[AtomicAutomata.scala:83:24, :119:59, :120:57]
wire _logic_out_T_51 = _logic_out_T_50[0]; // @[AtomicAutomata.scala:120:57]
wire [3:0] _logic_out_T_52 = cam_a_0_lut >> indexes_26; // @[AtomicAutomata.scala:83:24, :119:59, :120:57]
wire _logic_out_T_53 = _logic_out_T_52[0]; // @[AtomicAutomata.scala:120:57]
wire [3:0] _logic_out_T_54 = cam_a_0_lut >> indexes_27; // @[AtomicAutomata.scala:83:24, :119:59, :120:57]
wire _logic_out_T_55 = _logic_out_T_54[0]; // @[AtomicAutomata.scala:120:57]
wire [3:0] _logic_out_T_56 = cam_a_0_lut >> indexes_28; // @[AtomicAutomata.scala:83:24, :119:59, :120:57]
wire _logic_out_T_57 = _logic_out_T_56[0]; // @[AtomicAutomata.scala:120:57]
wire [3:0] _logic_out_T_58 = cam_a_0_lut >> indexes_29; // @[AtomicAutomata.scala:83:24, :119:59, :120:57]
wire _logic_out_T_59 = _logic_out_T_58[0]; // @[AtomicAutomata.scala:120:57]
wire [3:0] _logic_out_T_60 = cam_a_0_lut >> indexes_30; // @[AtomicAutomata.scala:83:24, :119:59, :120:57]
wire _logic_out_T_61 = _logic_out_T_60[0]; // @[AtomicAutomata.scala:120:57]
wire [3:0] _logic_out_T_62 = cam_a_0_lut >> indexes_31; // @[AtomicAutomata.scala:83:24, :119:59, :120:57]
wire _logic_out_T_63 = _logic_out_T_62[0]; // @[AtomicAutomata.scala:120:57]
wire [3:0] _logic_out_T_64 = cam_a_0_lut >> indexes_32; // @[AtomicAutomata.scala:83:24, :119:59, :120:57]
wire _logic_out_T_65 = _logic_out_T_64[0]; // @[AtomicAutomata.scala:120:57]
wire [3:0] _logic_out_T_66 = cam_a_0_lut >> indexes_33; // @[AtomicAutomata.scala:83:24, :119:59, :120:57]
wire _logic_out_T_67 = _logic_out_T_66[0]; // @[AtomicAutomata.scala:120:57]
wire [3:0] _logic_out_T_68 = cam_a_0_lut >> indexes_34; // @[AtomicAutomata.scala:83:24, :119:59, :120:57]
wire _logic_out_T_69 = _logic_out_T_68[0]; // @[AtomicAutomata.scala:120:57]
wire [3:0] _logic_out_T_70 = cam_a_0_lut >> indexes_35; // @[AtomicAutomata.scala:83:24, :119:59, :120:57]
wire _logic_out_T_71 = _logic_out_T_70[0]; // @[AtomicAutomata.scala:120:57]
wire [3:0] _logic_out_T_72 = cam_a_0_lut >> indexes_36; // @[AtomicAutomata.scala:83:24, :119:59, :120:57]
wire _logic_out_T_73 = _logic_out_T_72[0]; // @[AtomicAutomata.scala:120:57]
wire [3:0] _logic_out_T_74 = cam_a_0_lut >> indexes_37; // @[AtomicAutomata.scala:83:24, :119:59, :120:57]
wire _logic_out_T_75 = _logic_out_T_74[0]; // @[AtomicAutomata.scala:120:57]
wire [3:0] _logic_out_T_76 = cam_a_0_lut >> indexes_38; // @[AtomicAutomata.scala:83:24, :119:59, :120:57]
wire _logic_out_T_77 = _logic_out_T_76[0]; // @[AtomicAutomata.scala:120:57]
wire [3:0] _logic_out_T_78 = cam_a_0_lut >> indexes_39; // @[AtomicAutomata.scala:83:24, :119:59, :120:57]
wire _logic_out_T_79 = _logic_out_T_78[0]; // @[AtomicAutomata.scala:120:57]
wire [3:0] _logic_out_T_80 = cam_a_0_lut >> indexes_40; // @[AtomicAutomata.scala:83:24, :119:59, :120:57]
wire _logic_out_T_81 = _logic_out_T_80[0]; // @[AtomicAutomata.scala:120:57]
wire [3:0] _logic_out_T_82 = cam_a_0_lut >> indexes_41; // @[AtomicAutomata.scala:83:24, :119:59, :120:57]
wire _logic_out_T_83 = _logic_out_T_82[0]; // @[AtomicAutomata.scala:120:57]
wire [3:0] _logic_out_T_84 = cam_a_0_lut >> indexes_42; // @[AtomicAutomata.scala:83:24, :119:59, :120:57]
wire _logic_out_T_85 = _logic_out_T_84[0]; // @[AtomicAutomata.scala:120:57]
wire [3:0] _logic_out_T_86 = cam_a_0_lut >> indexes_43; // @[AtomicAutomata.scala:83:24, :119:59, :120:57]
wire _logic_out_T_87 = _logic_out_T_86[0]; // @[AtomicAutomata.scala:120:57]
wire [3:0] _logic_out_T_88 = cam_a_0_lut >> indexes_44; // @[AtomicAutomata.scala:83:24, :119:59, :120:57]
wire _logic_out_T_89 = _logic_out_T_88[0]; // @[AtomicAutomata.scala:120:57]
wire [3:0] _logic_out_T_90 = cam_a_0_lut >> indexes_45; // @[AtomicAutomata.scala:83:24, :119:59, :120:57]
wire _logic_out_T_91 = _logic_out_T_90[0]; // @[AtomicAutomata.scala:120:57]
wire [3:0] _logic_out_T_92 = cam_a_0_lut >> indexes_46; // @[AtomicAutomata.scala:83:24, :119:59, :120:57]
wire _logic_out_T_93 = _logic_out_T_92[0]; // @[AtomicAutomata.scala:120:57]
wire [3:0] _logic_out_T_94 = cam_a_0_lut >> indexes_47; // @[AtomicAutomata.scala:83:24, :119:59, :120:57]
wire _logic_out_T_95 = _logic_out_T_94[0]; // @[AtomicAutomata.scala:120:57]
wire [3:0] _logic_out_T_96 = cam_a_0_lut >> indexes_48; // @[AtomicAutomata.scala:83:24, :119:59, :120:57]
wire _logic_out_T_97 = _logic_out_T_96[0]; // @[AtomicAutomata.scala:120:57]
wire [3:0] _logic_out_T_98 = cam_a_0_lut >> indexes_49; // @[AtomicAutomata.scala:83:24, :119:59, :120:57]
wire _logic_out_T_99 = _logic_out_T_98[0]; // @[AtomicAutomata.scala:120:57]
wire [3:0] _logic_out_T_100 = cam_a_0_lut >> indexes_50; // @[AtomicAutomata.scala:83:24, :119:59, :120:57]
wire _logic_out_T_101 = _logic_out_T_100[0]; // @[AtomicAutomata.scala:120:57]
wire [3:0] _logic_out_T_102 = cam_a_0_lut >> indexes_51; // @[AtomicAutomata.scala:83:24, :119:59, :120:57]
wire _logic_out_T_103 = _logic_out_T_102[0]; // @[AtomicAutomata.scala:120:57]
wire [3:0] _logic_out_T_104 = cam_a_0_lut >> indexes_52; // @[AtomicAutomata.scala:83:24, :119:59, :120:57]
wire _logic_out_T_105 = _logic_out_T_104[0]; // @[AtomicAutomata.scala:120:57]
wire [3:0] _logic_out_T_106 = cam_a_0_lut >> indexes_53; // @[AtomicAutomata.scala:83:24, :119:59, :120:57]
wire _logic_out_T_107 = _logic_out_T_106[0]; // @[AtomicAutomata.scala:120:57]
wire [3:0] _logic_out_T_108 = cam_a_0_lut >> indexes_54; // @[AtomicAutomata.scala:83:24, :119:59, :120:57]
wire _logic_out_T_109 = _logic_out_T_108[0]; // @[AtomicAutomata.scala:120:57]
wire [3:0] _logic_out_T_110 = cam_a_0_lut >> indexes_55; // @[AtomicAutomata.scala:83:24, :119:59, :120:57]
wire _logic_out_T_111 = _logic_out_T_110[0]; // @[AtomicAutomata.scala:120:57]
wire [3:0] _logic_out_T_112 = cam_a_0_lut >> indexes_56; // @[AtomicAutomata.scala:83:24, :119:59, :120:57]
wire _logic_out_T_113 = _logic_out_T_112[0]; // @[AtomicAutomata.scala:120:57]
wire [3:0] _logic_out_T_114 = cam_a_0_lut >> indexes_57; // @[AtomicAutomata.scala:83:24, :119:59, :120:57]
wire _logic_out_T_115 = _logic_out_T_114[0]; // @[AtomicAutomata.scala:120:57]
wire [3:0] _logic_out_T_116 = cam_a_0_lut >> indexes_58; // @[AtomicAutomata.scala:83:24, :119:59, :120:57]
wire _logic_out_T_117 = _logic_out_T_116[0]; // @[AtomicAutomata.scala:120:57]
wire [3:0] _logic_out_T_118 = cam_a_0_lut >> indexes_59; // @[AtomicAutomata.scala:83:24, :119:59, :120:57]
wire _logic_out_T_119 = _logic_out_T_118[0]; // @[AtomicAutomata.scala:120:57]
wire [3:0] _logic_out_T_120 = cam_a_0_lut >> indexes_60; // @[AtomicAutomata.scala:83:24, :119:59, :120:57]
wire _logic_out_T_121 = _logic_out_T_120[0]; // @[AtomicAutomata.scala:120:57]
wire [3:0] _logic_out_T_122 = cam_a_0_lut >> indexes_61; // @[AtomicAutomata.scala:83:24, :119:59, :120:57]
wire _logic_out_T_123 = _logic_out_T_122[0]; // @[AtomicAutomata.scala:120:57]
wire [3:0] _logic_out_T_124 = cam_a_0_lut >> indexes_62; // @[AtomicAutomata.scala:83:24, :119:59, :120:57]
wire _logic_out_T_125 = _logic_out_T_124[0]; // @[AtomicAutomata.scala:120:57]
wire [3:0] _logic_out_T_126 = cam_a_0_lut >> indexes_63; // @[AtomicAutomata.scala:83:24, :119:59, :120:57]
wire _logic_out_T_127 = _logic_out_T_126[0]; // @[AtomicAutomata.scala:120:57]
wire [1:0] logic_out_lo_lo_lo_lo_lo = {_logic_out_T_3, _logic_out_T_1}; // @[AtomicAutomata.scala:120:{28,57}]
wire [1:0] logic_out_lo_lo_lo_lo_hi = {_logic_out_T_7, _logic_out_T_5}; // @[AtomicAutomata.scala:120:{28,57}]
wire [3:0] logic_out_lo_lo_lo_lo = {logic_out_lo_lo_lo_lo_hi, logic_out_lo_lo_lo_lo_lo}; // @[AtomicAutomata.scala:120:28]
wire [1:0] logic_out_lo_lo_lo_hi_lo = {_logic_out_T_11, _logic_out_T_9}; // @[AtomicAutomata.scala:120:{28,57}]
wire [1:0] logic_out_lo_lo_lo_hi_hi = {_logic_out_T_15, _logic_out_T_13}; // @[AtomicAutomata.scala:120:{28,57}]
wire [3:0] logic_out_lo_lo_lo_hi = {logic_out_lo_lo_lo_hi_hi, logic_out_lo_lo_lo_hi_lo}; // @[AtomicAutomata.scala:120:28]
wire [7:0] logic_out_lo_lo_lo = {logic_out_lo_lo_lo_hi, logic_out_lo_lo_lo_lo}; // @[AtomicAutomata.scala:120:28]
wire [1:0] logic_out_lo_lo_hi_lo_lo = {_logic_out_T_19, _logic_out_T_17}; // @[AtomicAutomata.scala:120:{28,57}]
wire [1:0] logic_out_lo_lo_hi_lo_hi = {_logic_out_T_23, _logic_out_T_21}; // @[AtomicAutomata.scala:120:{28,57}]
wire [3:0] logic_out_lo_lo_hi_lo = {logic_out_lo_lo_hi_lo_hi, logic_out_lo_lo_hi_lo_lo}; // @[AtomicAutomata.scala:120:28]
wire [1:0] logic_out_lo_lo_hi_hi_lo = {_logic_out_T_27, _logic_out_T_25}; // @[AtomicAutomata.scala:120:{28,57}]
wire [1:0] logic_out_lo_lo_hi_hi_hi = {_logic_out_T_31, _logic_out_T_29}; // @[AtomicAutomata.scala:120:{28,57}]
wire [3:0] logic_out_lo_lo_hi_hi = {logic_out_lo_lo_hi_hi_hi, logic_out_lo_lo_hi_hi_lo}; // @[AtomicAutomata.scala:120:28]
wire [7:0] logic_out_lo_lo_hi = {logic_out_lo_lo_hi_hi, logic_out_lo_lo_hi_lo}; // @[AtomicAutomata.scala:120:28]
wire [15:0] logic_out_lo_lo = {logic_out_lo_lo_hi, logic_out_lo_lo_lo}; // @[AtomicAutomata.scala:120:28]
wire [1:0] logic_out_lo_hi_lo_lo_lo = {_logic_out_T_35, _logic_out_T_33}; // @[AtomicAutomata.scala:120:{28,57}]
wire [1:0] logic_out_lo_hi_lo_lo_hi = {_logic_out_T_39, _logic_out_T_37}; // @[AtomicAutomata.scala:120:{28,57}]
wire [3:0] logic_out_lo_hi_lo_lo = {logic_out_lo_hi_lo_lo_hi, logic_out_lo_hi_lo_lo_lo}; // @[AtomicAutomata.scala:120:28]
wire [1:0] logic_out_lo_hi_lo_hi_lo = {_logic_out_T_43, _logic_out_T_41}; // @[AtomicAutomata.scala:120:{28,57}]
wire [1:0] logic_out_lo_hi_lo_hi_hi = {_logic_out_T_47, _logic_out_T_45}; // @[AtomicAutomata.scala:120:{28,57}]
wire [3:0] logic_out_lo_hi_lo_hi = {logic_out_lo_hi_lo_hi_hi, logic_out_lo_hi_lo_hi_lo}; // @[AtomicAutomata.scala:120:28]
wire [7:0] logic_out_lo_hi_lo = {logic_out_lo_hi_lo_hi, logic_out_lo_hi_lo_lo}; // @[AtomicAutomata.scala:120:28]
wire [1:0] logic_out_lo_hi_hi_lo_lo = {_logic_out_T_51, _logic_out_T_49}; // @[AtomicAutomata.scala:120:{28,57}]
wire [1:0] logic_out_lo_hi_hi_lo_hi = {_logic_out_T_55, _logic_out_T_53}; // @[AtomicAutomata.scala:120:{28,57}]
wire [3:0] logic_out_lo_hi_hi_lo = {logic_out_lo_hi_hi_lo_hi, logic_out_lo_hi_hi_lo_lo}; // @[AtomicAutomata.scala:120:28]
wire [1:0] logic_out_lo_hi_hi_hi_lo = {_logic_out_T_59, _logic_out_T_57}; // @[AtomicAutomata.scala:120:{28,57}]
wire [1:0] logic_out_lo_hi_hi_hi_hi = {_logic_out_T_63, _logic_out_T_61}; // @[AtomicAutomata.scala:120:{28,57}]
wire [3:0] logic_out_lo_hi_hi_hi = {logic_out_lo_hi_hi_hi_hi, logic_out_lo_hi_hi_hi_lo}; // @[AtomicAutomata.scala:120:28]
wire [7:0] logic_out_lo_hi_hi = {logic_out_lo_hi_hi_hi, logic_out_lo_hi_hi_lo}; // @[AtomicAutomata.scala:120:28]
wire [15:0] logic_out_lo_hi = {logic_out_lo_hi_hi, logic_out_lo_hi_lo}; // @[AtomicAutomata.scala:120:28]
wire [31:0] logic_out_lo = {logic_out_lo_hi, logic_out_lo_lo}; // @[AtomicAutomata.scala:120:28]
wire [1:0] logic_out_hi_lo_lo_lo_lo = {_logic_out_T_67, _logic_out_T_65}; // @[AtomicAutomata.scala:120:{28,57}]
wire [1:0] logic_out_hi_lo_lo_lo_hi = {_logic_out_T_71, _logic_out_T_69}; // @[AtomicAutomata.scala:120:{28,57}]
wire [3:0] logic_out_hi_lo_lo_lo = {logic_out_hi_lo_lo_lo_hi, logic_out_hi_lo_lo_lo_lo}; // @[AtomicAutomata.scala:120:28]
wire [1:0] logic_out_hi_lo_lo_hi_lo = {_logic_out_T_75, _logic_out_T_73}; // @[AtomicAutomata.scala:120:{28,57}]
wire [1:0] logic_out_hi_lo_lo_hi_hi = {_logic_out_T_79, _logic_out_T_77}; // @[AtomicAutomata.scala:120:{28,57}]
wire [3:0] logic_out_hi_lo_lo_hi = {logic_out_hi_lo_lo_hi_hi, logic_out_hi_lo_lo_hi_lo}; // @[AtomicAutomata.scala:120:28]
wire [7:0] logic_out_hi_lo_lo = {logic_out_hi_lo_lo_hi, logic_out_hi_lo_lo_lo}; // @[AtomicAutomata.scala:120:28]
wire [1:0] logic_out_hi_lo_hi_lo_lo = {_logic_out_T_83, _logic_out_T_81}; // @[AtomicAutomata.scala:120:{28,57}]
wire [1:0] logic_out_hi_lo_hi_lo_hi = {_logic_out_T_87, _logic_out_T_85}; // @[AtomicAutomata.scala:120:{28,57}]
wire [3:0] logic_out_hi_lo_hi_lo = {logic_out_hi_lo_hi_lo_hi, logic_out_hi_lo_hi_lo_lo}; // @[AtomicAutomata.scala:120:28]
wire [1:0] logic_out_hi_lo_hi_hi_lo = {_logic_out_T_91, _logic_out_T_89}; // @[AtomicAutomata.scala:120:{28,57}]
wire [1:0] logic_out_hi_lo_hi_hi_hi = {_logic_out_T_95, _logic_out_T_93}; // @[AtomicAutomata.scala:120:{28,57}]
wire [3:0] logic_out_hi_lo_hi_hi = {logic_out_hi_lo_hi_hi_hi, logic_out_hi_lo_hi_hi_lo}; // @[AtomicAutomata.scala:120:28]
wire [7:0] logic_out_hi_lo_hi = {logic_out_hi_lo_hi_hi, logic_out_hi_lo_hi_lo}; // @[AtomicAutomata.scala:120:28]
wire [15:0] logic_out_hi_lo = {logic_out_hi_lo_hi, logic_out_hi_lo_lo}; // @[AtomicAutomata.scala:120:28]
wire [1:0] logic_out_hi_hi_lo_lo_lo = {_logic_out_T_99, _logic_out_T_97}; // @[AtomicAutomata.scala:120:{28,57}]
wire [1:0] logic_out_hi_hi_lo_lo_hi = {_logic_out_T_103, _logic_out_T_101}; // @[AtomicAutomata.scala:120:{28,57}]
wire [3:0] logic_out_hi_hi_lo_lo = {logic_out_hi_hi_lo_lo_hi, logic_out_hi_hi_lo_lo_lo}; // @[AtomicAutomata.scala:120:28]
wire [1:0] logic_out_hi_hi_lo_hi_lo = {_logic_out_T_107, _logic_out_T_105}; // @[AtomicAutomata.scala:120:{28,57}]
wire [1:0] logic_out_hi_hi_lo_hi_hi = {_logic_out_T_111, _logic_out_T_109}; // @[AtomicAutomata.scala:120:{28,57}]
wire [3:0] logic_out_hi_hi_lo_hi = {logic_out_hi_hi_lo_hi_hi, logic_out_hi_hi_lo_hi_lo}; // @[AtomicAutomata.scala:120:28]
wire [7:0] logic_out_hi_hi_lo = {logic_out_hi_hi_lo_hi, logic_out_hi_hi_lo_lo}; // @[AtomicAutomata.scala:120:28]
wire [1:0] logic_out_hi_hi_hi_lo_lo = {_logic_out_T_115, _logic_out_T_113}; // @[AtomicAutomata.scala:120:{28,57}]
wire [1:0] logic_out_hi_hi_hi_lo_hi = {_logic_out_T_119, _logic_out_T_117}; // @[AtomicAutomata.scala:120:{28,57}]
wire [3:0] logic_out_hi_hi_hi_lo = {logic_out_hi_hi_hi_lo_hi, logic_out_hi_hi_hi_lo_lo}; // @[AtomicAutomata.scala:120:28]
wire [1:0] logic_out_hi_hi_hi_hi_lo = {_logic_out_T_123, _logic_out_T_121}; // @[AtomicAutomata.scala:120:{28,57}]
wire [1:0] logic_out_hi_hi_hi_hi_hi = {_logic_out_T_127, _logic_out_T_125}; // @[AtomicAutomata.scala:120:{28,57}]
wire [3:0] logic_out_hi_hi_hi_hi = {logic_out_hi_hi_hi_hi_hi, logic_out_hi_hi_hi_hi_lo}; // @[AtomicAutomata.scala:120:28]
wire [7:0] logic_out_hi_hi_hi = {logic_out_hi_hi_hi_hi, logic_out_hi_hi_hi_lo}; // @[AtomicAutomata.scala:120:28]
wire [15:0] logic_out_hi_hi = {logic_out_hi_hi_hi, logic_out_hi_hi_lo}; // @[AtomicAutomata.scala:120:28]
wire [31:0] logic_out_hi = {logic_out_hi_hi, logic_out_hi_lo}; // @[AtomicAutomata.scala:120:28]
wire [63:0] logic_out = {logic_out_hi, logic_out_lo}; // @[AtomicAutomata.scala:120:28]
wire unsigned_0 = cam_a_0_bits_param[1]; // @[AtomicAutomata.scala:83:24, :123:42]
wire take_max = cam_a_0_bits_param[0]; // @[AtomicAutomata.scala:83:24, :124:42]
wire adder = cam_a_0_bits_param[2]; // @[AtomicAutomata.scala:83:24, :125:39]
wire [7:0] _signSel_T = ~cam_a_0_bits_mask; // @[AtomicAutomata.scala:83:24, :127:25]
wire [6:0] _signSel_T_1 = cam_a_0_bits_mask[7:1]; // @[AtomicAutomata.scala:83:24, :127:39]
wire [7:0] _signSel_T_2 = {_signSel_T[7], _signSel_T[6:0] | _signSel_T_1}; // @[AtomicAutomata.scala:127:{25,31,39}]
wire [7:0] signSel = ~_signSel_T_2; // @[AtomicAutomata.scala:127:{23,31}]
wire [1:0] signbits_a_lo_lo = {_signbits_a_T_1, _signbits_a_T}; // @[AtomicAutomata.scala:128:{29,64}]
wire [1:0] signbits_a_lo_hi = {_signbits_a_T_3, _signbits_a_T_2}; // @[AtomicAutomata.scala:128:{29,64}]
wire [3:0] signbits_a_lo = {signbits_a_lo_hi, signbits_a_lo_lo}; // @[AtomicAutomata.scala:128:29]
wire [1:0] signbits_a_hi_lo = {_signbits_a_T_5, _signbits_a_T_4}; // @[AtomicAutomata.scala:128:{29,64}]
wire [1:0] signbits_a_hi_hi = {_signbits_a_T_7, _signbits_a_T_6}; // @[AtomicAutomata.scala:128:{29,64}]
wire [3:0] signbits_a_hi = {signbits_a_hi_hi, signbits_a_hi_lo}; // @[AtomicAutomata.scala:128:29]
wire [7:0] signbits_a = {signbits_a_hi, signbits_a_lo}; // @[AtomicAutomata.scala:128:29]
wire [1:0] signbits_d_lo_lo = {_signbits_d_T_1, _signbits_d_T}; // @[AtomicAutomata.scala:129:{29,64}]
wire [1:0] signbits_d_lo_hi = {_signbits_d_T_3, _signbits_d_T_2}; // @[AtomicAutomata.scala:129:{29,64}]
wire [3:0] signbits_d_lo = {signbits_d_lo_hi, signbits_d_lo_lo}; // @[AtomicAutomata.scala:129:29]
wire [1:0] signbits_d_hi_lo = {_signbits_d_T_5, _signbits_d_T_4}; // @[AtomicAutomata.scala:129:{29,64}]
wire [1:0] signbits_d_hi_hi = {_signbits_d_T_7, _signbits_d_T_6}; // @[AtomicAutomata.scala:129:{29,64}]
wire [3:0] signbits_d_hi = {signbits_d_hi_hi, signbits_d_hi_lo}; // @[AtomicAutomata.scala:129:29]
wire [7:0] signbits_d = {signbits_d_hi, signbits_d_lo}; // @[AtomicAutomata.scala:129:29]
wire [7:0] _signbit_a_T = signbits_a & signSel; // @[AtomicAutomata.scala:127:23, :128:29, :131:38]
wire [8:0] _signbit_a_T_1 = {_signbit_a_T, 1'h0}; // @[AtomicAutomata.scala:131:{38,49}]
wire [7:0] signbit_a = _signbit_a_T_1[7:0]; // @[AtomicAutomata.scala:131:{49,54}]
wire [7:0] _signbit_d_T = signbits_d & signSel; // @[AtomicAutomata.scala:127:23, :129:29, :132:38]
wire [8:0] _signbit_d_T_1 = {_signbit_d_T, 1'h0}; // @[AtomicAutomata.scala:132:{38,49}]
wire [7:0] signbit_d = _signbit_d_T_1[7:0]; // @[AtomicAutomata.scala:132:{49,54}]
wire [8:0] _signext_a_T = {signbit_a, 1'h0}; // @[package.scala:253:48]
wire [7:0] _signext_a_T_1 = _signext_a_T[7:0]; // @[package.scala:253:{48,53}]
wire [7:0] _signext_a_T_2 = signbit_a | _signext_a_T_1; // @[package.scala:253:{43,53}]
wire [9:0] _signext_a_T_3 = {_signext_a_T_2, 2'h0}; // @[package.scala:253:{43,48}]
wire [7:0] _signext_a_T_4 = _signext_a_T_3[7:0]; // @[package.scala:253:{48,53}]
wire [7:0] _signext_a_T_5 = _signext_a_T_2 | _signext_a_T_4; // @[package.scala:253:{43,53}]
wire [11:0] _signext_a_T_6 = {_signext_a_T_5, 4'h0}; // @[package.scala:253:{43,48}]
wire [7:0] _signext_a_T_7 = _signext_a_T_6[7:0]; // @[package.scala:253:{48,53}]
wire [7:0] _signext_a_T_8 = _signext_a_T_5 | _signext_a_T_7; // @[package.scala:253:{43,53}]
wire [7:0] _signext_a_T_9 = _signext_a_T_8; // @[package.scala:253:43, :254:17]
wire _signext_a_T_10 = _signext_a_T_9[0]; // @[package.scala:254:17]
wire _signext_a_T_11 = _signext_a_T_9[1]; // @[package.scala:254:17]
wire _signext_a_T_12 = _signext_a_T_9[2]; // @[package.scala:254:17]
wire _signext_a_T_13 = _signext_a_T_9[3]; // @[package.scala:254:17]
wire _signext_a_T_14 = _signext_a_T_9[4]; // @[package.scala:254:17]
wire _signext_a_T_15 = _signext_a_T_9[5]; // @[package.scala:254:17]
wire _signext_a_T_16 = _signext_a_T_9[6]; // @[package.scala:254:17]
wire _signext_a_T_17 = _signext_a_T_9[7]; // @[package.scala:254:17]
wire [7:0] _signext_a_T_18 = {8{_signext_a_T_10}}; // @[AtomicAutomata.scala:133:40]
wire [7:0] _signext_a_T_19 = {8{_signext_a_T_11}}; // @[AtomicAutomata.scala:133:40]
wire [7:0] _signext_a_T_20 = {8{_signext_a_T_12}}; // @[AtomicAutomata.scala:133:40]
wire [7:0] _signext_a_T_21 = {8{_signext_a_T_13}}; // @[AtomicAutomata.scala:133:40]
wire [7:0] _signext_a_T_22 = {8{_signext_a_T_14}}; // @[AtomicAutomata.scala:133:40]
wire [7:0] _signext_a_T_23 = {8{_signext_a_T_15}}; // @[AtomicAutomata.scala:133:40]
wire [7:0] _signext_a_T_24 = {8{_signext_a_T_16}}; // @[AtomicAutomata.scala:133:40]
wire [7:0] _signext_a_T_25 = {8{_signext_a_T_17}}; // @[AtomicAutomata.scala:133:40]
wire [15:0] signext_a_lo_lo = {_signext_a_T_19, _signext_a_T_18}; // @[AtomicAutomata.scala:133:40]
wire [15:0] signext_a_lo_hi = {_signext_a_T_21, _signext_a_T_20}; // @[AtomicAutomata.scala:133:40]
wire [31:0] signext_a_lo = {signext_a_lo_hi, signext_a_lo_lo}; // @[AtomicAutomata.scala:133:40]
wire [15:0] signext_a_hi_lo = {_signext_a_T_23, _signext_a_T_22}; // @[AtomicAutomata.scala:133:40]
wire [15:0] signext_a_hi_hi = {_signext_a_T_25, _signext_a_T_24}; // @[AtomicAutomata.scala:133:40]
wire [31:0] signext_a_hi = {signext_a_hi_hi, signext_a_hi_lo}; // @[AtomicAutomata.scala:133:40]
wire [63:0] signext_a = {signext_a_hi, signext_a_lo}; // @[AtomicAutomata.scala:133:40]
wire [8:0] _signext_d_T = {signbit_d, 1'h0}; // @[package.scala:253:48]
wire [7:0] _signext_d_T_1 = _signext_d_T[7:0]; // @[package.scala:253:{48,53}]
wire [7:0] _signext_d_T_2 = signbit_d | _signext_d_T_1; // @[package.scala:253:{43,53}]
wire [9:0] _signext_d_T_3 = {_signext_d_T_2, 2'h0}; // @[package.scala:253:{43,48}]
wire [7:0] _signext_d_T_4 = _signext_d_T_3[7:0]; // @[package.scala:253:{48,53}]
wire [7:0] _signext_d_T_5 = _signext_d_T_2 | _signext_d_T_4; // @[package.scala:253:{43,53}]
wire [11:0] _signext_d_T_6 = {_signext_d_T_5, 4'h0}; // @[package.scala:253:{43,48}]
wire [7:0] _signext_d_T_7 = _signext_d_T_6[7:0]; // @[package.scala:253:{48,53}]
wire [7:0] _signext_d_T_8 = _signext_d_T_5 | _signext_d_T_7; // @[package.scala:253:{43,53}]
wire [7:0] _signext_d_T_9 = _signext_d_T_8; // @[package.scala:253:43, :254:17]
wire _signext_d_T_10 = _signext_d_T_9[0]; // @[package.scala:254:17]
wire _signext_d_T_11 = _signext_d_T_9[1]; // @[package.scala:254:17]
wire _signext_d_T_12 = _signext_d_T_9[2]; // @[package.scala:254:17]
wire _signext_d_T_13 = _signext_d_T_9[3]; // @[package.scala:254:17]
wire _signext_d_T_14 = _signext_d_T_9[4]; // @[package.scala:254:17]
wire _signext_d_T_15 = _signext_d_T_9[5]; // @[package.scala:254:17]
wire _signext_d_T_16 = _signext_d_T_9[6]; // @[package.scala:254:17]
wire _signext_d_T_17 = _signext_d_T_9[7]; // @[package.scala:254:17]
wire [7:0] _signext_d_T_18 = {8{_signext_d_T_10}}; // @[AtomicAutomata.scala:134:40]
wire [7:0] _signext_d_T_19 = {8{_signext_d_T_11}}; // @[AtomicAutomata.scala:134:40]
wire [7:0] _signext_d_T_20 = {8{_signext_d_T_12}}; // @[AtomicAutomata.scala:134:40]
wire [7:0] _signext_d_T_21 = {8{_signext_d_T_13}}; // @[AtomicAutomata.scala:134:40]
wire [7:0] _signext_d_T_22 = {8{_signext_d_T_14}}; // @[AtomicAutomata.scala:134:40]
wire [7:0] _signext_d_T_23 = {8{_signext_d_T_15}}; // @[AtomicAutomata.scala:134:40]
wire [7:0] _signext_d_T_24 = {8{_signext_d_T_16}}; // @[AtomicAutomata.scala:134:40]
wire [7:0] _signext_d_T_25 = {8{_signext_d_T_17}}; // @[AtomicAutomata.scala:134:40]
wire [15:0] signext_d_lo_lo = {_signext_d_T_19, _signext_d_T_18}; // @[AtomicAutomata.scala:134:40]
wire [15:0] signext_d_lo_hi = {_signext_d_T_21, _signext_d_T_20}; // @[AtomicAutomata.scala:134:40]
wire [31:0] signext_d_lo = {signext_d_lo_hi, signext_d_lo_lo}; // @[AtomicAutomata.scala:134:40]
wire [15:0] signext_d_hi_lo = {_signext_d_T_23, _signext_d_T_22}; // @[AtomicAutomata.scala:134:40]
wire [15:0] signext_d_hi_hi = {_signext_d_T_25, _signext_d_T_24}; // @[AtomicAutomata.scala:134:40]
wire [31:0] signext_d_hi = {signext_d_hi_hi, signext_d_hi_lo}; // @[AtomicAutomata.scala:134:40]
wire [63:0] signext_d = {signext_d_hi, signext_d_lo}; // @[AtomicAutomata.scala:134:40]
wire _wide_mask_T = cam_a_0_bits_mask[0]; // @[AtomicAutomata.scala:83:24, :136:40]
wire _wide_mask_T_1 = cam_a_0_bits_mask[1]; // @[AtomicAutomata.scala:83:24, :136:40]
wire _wide_mask_T_2 = cam_a_0_bits_mask[2]; // @[AtomicAutomata.scala:83:24, :136:40]
wire _wide_mask_T_3 = cam_a_0_bits_mask[3]; // @[AtomicAutomata.scala:83:24, :136:40]
wire _wide_mask_T_4 = cam_a_0_bits_mask[4]; // @[AtomicAutomata.scala:83:24, :136:40]
wire _wide_mask_T_5 = cam_a_0_bits_mask[5]; // @[AtomicAutomata.scala:83:24, :136:40]
wire _wide_mask_T_6 = cam_a_0_bits_mask[6]; // @[AtomicAutomata.scala:83:24, :136:40]
wire _wide_mask_T_7 = cam_a_0_bits_mask[7]; // @[AtomicAutomata.scala:83:24, :136:40]
wire [7:0] _wide_mask_T_8 = {8{_wide_mask_T}}; // @[AtomicAutomata.scala:136:40]
wire [7:0] _wide_mask_T_9 = {8{_wide_mask_T_1}}; // @[AtomicAutomata.scala:136:40]
wire [7:0] _wide_mask_T_10 = {8{_wide_mask_T_2}}; // @[AtomicAutomata.scala:136:40]
wire [7:0] _wide_mask_T_11 = {8{_wide_mask_T_3}}; // @[AtomicAutomata.scala:136:40]
wire [7:0] _wide_mask_T_12 = {8{_wide_mask_T_4}}; // @[AtomicAutomata.scala:136:40]
wire [7:0] _wide_mask_T_13 = {8{_wide_mask_T_5}}; // @[AtomicAutomata.scala:136:40]
wire [7:0] _wide_mask_T_14 = {8{_wide_mask_T_6}}; // @[AtomicAutomata.scala:136:40]
wire [7:0] _wide_mask_T_15 = {8{_wide_mask_T_7}}; // @[AtomicAutomata.scala:136:40]
wire [15:0] wide_mask_lo_lo = {_wide_mask_T_9, _wide_mask_T_8}; // @[AtomicAutomata.scala:136:40]
wire [15:0] wide_mask_lo_hi = {_wide_mask_T_11, _wide_mask_T_10}; // @[AtomicAutomata.scala:136:40]
wire [31:0] wide_mask_lo = {wide_mask_lo_hi, wide_mask_lo_lo}; // @[AtomicAutomata.scala:136:40]
wire [15:0] wide_mask_hi_lo = {_wide_mask_T_13, _wide_mask_T_12}; // @[AtomicAutomata.scala:136:40]
wire [15:0] wide_mask_hi_hi = {_wide_mask_T_15, _wide_mask_T_14}; // @[AtomicAutomata.scala:136:40]
wire [31:0] wide_mask_hi = {wide_mask_hi_hi, wide_mask_hi_lo}; // @[AtomicAutomata.scala:136:40]
wire [63:0] wide_mask = {wide_mask_hi, wide_mask_lo}; // @[AtomicAutomata.scala:136:40]
wire [63:0] _a_a_ext_T = cam_a_0_bits_data & wide_mask; // @[AtomicAutomata.scala:83:24, :136:40, :137:28]
wire [63:0] a_a_ext = _a_a_ext_T | signext_a; // @[AtomicAutomata.scala:133:40, :137:{28,41}]
wire [63:0] _a_d_ext_T = cam_d_0_data & wide_mask; // @[AtomicAutomata.scala:84:24, :136:40, :138:28]
wire [63:0] a_d_ext = _a_d_ext_T | signext_d; // @[AtomicAutomata.scala:134:40, :138:{28,41}]
wire [63:0] _a_d_inv_T = ~a_d_ext; // @[AtomicAutomata.scala:138:41, :139:43]
wire [63:0] a_d_inv = adder ? a_d_ext : _a_d_inv_T; // @[AtomicAutomata.scala:125:39, :138:41, :139:{26,43}]
wire [64:0] _adder_out_T = {1'h0, a_a_ext} + {1'h0, a_d_inv}; // @[AtomicAutomata.scala:137:41, :139:26, :140:33]
wire [63:0] adder_out = _adder_out_T[63:0]; // @[AtomicAutomata.scala:140:33]
wire _a_bigger_uneq_T = a_a_ext[63]; // @[AtomicAutomata.scala:137:41, :142:49]
wire _a_bigger_T = a_a_ext[63]; // @[AtomicAutomata.scala:137:41, :142:49, :143:35]
wire a_bigger_uneq = unsigned_0 == _a_bigger_uneq_T; // @[AtomicAutomata.scala:123:42, :142:{38,49}]
wire _a_bigger_T_1 = a_d_ext[63]; // @[AtomicAutomata.scala:138:41, :143:50]
wire _a_bigger_T_2 = _a_bigger_T == _a_bigger_T_1; // @[AtomicAutomata.scala:143:{35,39,50}]
wire _a_bigger_T_3 = adder_out[63]; // @[AtomicAutomata.scala:140:33, :143:65]
wire _a_bigger_T_4 = ~_a_bigger_T_3; // @[AtomicAutomata.scala:143:{55,65}]
wire a_bigger = _a_bigger_T_2 ? _a_bigger_T_4 : a_bigger_uneq; // @[AtomicAutomata.scala:142:38, :143:{27,39,55}]
wire pick_a = take_max == a_bigger; // @[AtomicAutomata.scala:124:42, :143:27, :144:31]
wire [63:0] _arith_out_T = pick_a ? cam_a_0_bits_data : cam_d_0_data; // @[AtomicAutomata.scala:83:24, :84:24, :144:31, :145:50]
wire [63:0] arith_out = adder ? adder_out : _arith_out_T; // @[AtomicAutomata.scala:125:39, :140:33, :145:{28,50}]
wire _amo_data_T = cam_a_0_bits_opcode[0]; // @[AtomicAutomata.scala:83:24, :151:34]
wire [63:0] amo_data = _amo_data_T ? logic_out : arith_out; // @[AtomicAutomata.scala:120:28, :145:28, :151:{14,34}]
wire [63:0] source_c_bits_a_data = amo_data; // @[Edges.scala:480:17]
wire _source_i_ready_T; // @[Arbiter.scala:94:31]
wire _source_i_valid_T; // @[AtomicAutomata.scala:157:38]
wire [2:0] source_i_bits_opcode; // @[AtomicAutomata.scala:154:28]
wire [2:0] source_i_bits_param; // @[AtomicAutomata.scala:154:28]
wire source_i_ready; // @[AtomicAutomata.scala:154:28]
wire source_i_valid; // @[AtomicAutomata.scala:154:28]
wire _a_allow_T = ~a_cam_busy; // @[AtomicAutomata.scala:111:96, :155:23]
wire _a_allow_T_1 = a_isSupported | cam_free_0; // @[AtomicAutomata.scala:86:44, :98:32, :155:53]
wire a_allow = _a_allow_T & _a_allow_T_1; // @[AtomicAutomata.scala:155:{23,35,53}]
assign _nodeIn_a_ready_T = source_i_ready & a_allow; // @[AtomicAutomata.scala:154:28, :155:35, :156:38]
assign nodeIn_a_ready = _nodeIn_a_ready_T; // @[AtomicAutomata.scala:156:38]
assign _source_i_valid_T = nodeIn_a_valid & a_allow; // @[AtomicAutomata.scala:155:35, :157:38]
assign source_i_valid = _source_i_valid_T; // @[AtomicAutomata.scala:154:28, :157:38]
assign source_i_bits_opcode = a_isSupported ? nodeIn_a_bits_opcode : 3'h4; // @[AtomicAutomata.scala:98:32, :154:28, :158:24, :159:31, :160:32]
assign source_i_bits_param = a_isSupported ? nodeIn_a_bits_param : 3'h0; // @[AtomicAutomata.scala:98:32, :154:28, :158:24, :159:31, :161:32]
wire _source_c_ready_T; // @[Arbiter.scala:94:31]
wire [7:0] source_c_bits_a_mask; // @[Edges.scala:480:17]
wire source_c_bits_a_corrupt; // @[Edges.scala:480:17]
wire [3:0] source_c_bits_size; // @[AtomicAutomata.scala:165:28]
wire [7:0] source_c_bits_source; // @[AtomicAutomata.scala:165:28]
wire [28:0] source_c_bits_address; // @[AtomicAutomata.scala:165:28]
wire [7:0] source_c_bits_mask; // @[AtomicAutomata.scala:165:28]
wire [63:0] source_c_bits_data; // @[AtomicAutomata.scala:165:28]
wire source_c_bits_corrupt; // @[AtomicAutomata.scala:165:28]
wire source_c_ready; // @[AtomicAutomata.scala:165:28]
wire _source_c_bits_T = cam_a_0_bits_corrupt | cam_d_0_corrupt; // @[AtomicAutomata.scala:83:24, :84:24, :172:45]
assign source_c_bits_a_corrupt = _source_c_bits_T; // @[Edges.scala:480:17]
wire _source_c_bits_legal_T_1 = cam_a_0_bits_size < 4'hD; // @[AtomicAutomata.scala:83:24]
wire _source_c_bits_legal_T_2 = _source_c_bits_legal_T_1; // @[Parameters.scala:92:{33,38}]
wire _source_c_bits_legal_T_3 = _source_c_bits_legal_T_2; // @[Parameters.scala:684:29]
wire [28:0] _source_c_bits_legal_T_4 = {cam_a_0_bits_address[28:14], cam_a_0_bits_address[13:0] ^ 14'h3000}; // @[AtomicAutomata.scala:83:24]
wire [29:0] _source_c_bits_legal_T_5 = {1'h0, _source_c_bits_legal_T_4}; // @[Parameters.scala:137:{31,41}]
wire [29:0] _source_c_bits_legal_T_6 = _source_c_bits_legal_T_5 & 30'h1A113000; // @[Parameters.scala:137:{41,46}]
wire [29:0] _source_c_bits_legal_T_7 = _source_c_bits_legal_T_6; // @[Parameters.scala:137:46]
wire _source_c_bits_legal_T_8 = _source_c_bits_legal_T_7 == 30'h0; // @[Parameters.scala:137:{46,59}]
wire _source_c_bits_legal_T_9 = _source_c_bits_legal_T_3 & _source_c_bits_legal_T_8; // @[Parameters.scala:684:{29,54}]
wire _source_c_bits_legal_T_63 = _source_c_bits_legal_T_9; // @[Parameters.scala:684:54, :686:26]
wire _source_c_bits_legal_T_11 = cam_a_0_bits_size < 4'h7; // @[AtomicAutomata.scala:83:24]
wire _source_c_bits_legal_T_12 = _source_c_bits_legal_T_11; // @[Parameters.scala:92:{33,38}]
wire _source_c_bits_legal_T_13 = _source_c_bits_legal_T_12; // @[Parameters.scala:684:29]
wire [29:0] _source_c_bits_legal_T_15 = {1'h0, _source_c_bits_legal_T_14}; // @[Parameters.scala:137:{31,41}]
wire [29:0] _source_c_bits_legal_T_16 = _source_c_bits_legal_T_15 & 30'h1A112000; // @[Parameters.scala:137:{41,46}]
wire [29:0] _source_c_bits_legal_T_17 = _source_c_bits_legal_T_16; // @[Parameters.scala:137:46]
wire _source_c_bits_legal_T_18 = _source_c_bits_legal_T_17 == 30'h0; // @[Parameters.scala:137:{46,59}]
wire [28:0] _source_c_bits_legal_T_19 = {cam_a_0_bits_address[28:21], cam_a_0_bits_address[20:0] ^ 21'h100000}; // @[AtomicAutomata.scala:83:24]
wire [29:0] _source_c_bits_legal_T_20 = {1'h0, _source_c_bits_legal_T_19}; // @[Parameters.scala:137:{31,41}]
wire [29:0] _source_c_bits_legal_T_21 = _source_c_bits_legal_T_20 & 30'h1A103000; // @[Parameters.scala:137:{41,46}]
wire [29:0] _source_c_bits_legal_T_22 = _source_c_bits_legal_T_21; // @[Parameters.scala:137:46]
wire _source_c_bits_legal_T_23 = _source_c_bits_legal_T_22 == 30'h0; // @[Parameters.scala:137:{46,59}]
wire [28:0] _source_c_bits_legal_T_24 = {cam_a_0_bits_address[28:26], cam_a_0_bits_address[25:0] ^ 26'h2000000}; // @[AtomicAutomata.scala:83:24]
wire [29:0] _source_c_bits_legal_T_25 = {1'h0, _source_c_bits_legal_T_24}; // @[Parameters.scala:137:{31,41}]
wire [29:0] _source_c_bits_legal_T_26 = _source_c_bits_legal_T_25 & 30'h1A110000; // @[Parameters.scala:137:{41,46}]
wire [29:0] _source_c_bits_legal_T_27 = _source_c_bits_legal_T_26; // @[Parameters.scala:137:46]
wire _source_c_bits_legal_T_28 = _source_c_bits_legal_T_27 == 30'h0; // @[Parameters.scala:137:{46,59}]
wire [28:0] _source_c_bits_legal_T_29 = {cam_a_0_bits_address[28:26], cam_a_0_bits_address[25:0] ^ 26'h2010000}; // @[AtomicAutomata.scala:83:24]
wire [29:0] _source_c_bits_legal_T_30 = {1'h0, _source_c_bits_legal_T_29}; // @[Parameters.scala:137:{31,41}]
wire [29:0] _source_c_bits_legal_T_31 = _source_c_bits_legal_T_30 & 30'h1A113000; // @[Parameters.scala:137:{41,46}]
wire [29:0] _source_c_bits_legal_T_32 = _source_c_bits_legal_T_31; // @[Parameters.scala:137:46]
wire _source_c_bits_legal_T_33 = _source_c_bits_legal_T_32 == 30'h0; // @[Parameters.scala:137:{46,59}]
wire [28:0] _source_c_bits_legal_T_34 = {cam_a_0_bits_address[28], cam_a_0_bits_address[27:0] ^ 28'h8000000}; // @[AtomicAutomata.scala:83:24]
wire [29:0] _source_c_bits_legal_T_35 = {1'h0, _source_c_bits_legal_T_34}; // @[Parameters.scala:137:{31,41}]
wire [29:0] _source_c_bits_legal_T_36 = _source_c_bits_legal_T_35 & 30'h18000000; // @[Parameters.scala:137:{41,46}]
wire [29:0] _source_c_bits_legal_T_37 = _source_c_bits_legal_T_36; // @[Parameters.scala:137:46]
wire _source_c_bits_legal_T_38 = _source_c_bits_legal_T_37 == 30'h0; // @[Parameters.scala:137:{46,59}]
wire [28:0] _source_c_bits_legal_T_39 = cam_a_0_bits_address ^ 29'h10000000; // @[AtomicAutomata.scala:83:24]
wire [29:0] _source_c_bits_legal_T_40 = {1'h0, _source_c_bits_legal_T_39}; // @[Parameters.scala:137:{31,41}]
wire [29:0] _source_c_bits_legal_T_41 = _source_c_bits_legal_T_40 & 30'h1A113000; // @[Parameters.scala:137:{41,46}]
wire [29:0] _source_c_bits_legal_T_42 = _source_c_bits_legal_T_41; // @[Parameters.scala:137:46]
wire _source_c_bits_legal_T_43 = _source_c_bits_legal_T_42 == 30'h0; // @[Parameters.scala:137:{46,59}]
wire [28:0] _source_c_bits_legal_T_44 = cam_a_0_bits_address ^ 29'h10012000; // @[AtomicAutomata.scala:83:24]
wire [29:0] _source_c_bits_legal_T_45 = {1'h0, _source_c_bits_legal_T_44}; // @[Parameters.scala:137:{31,41}]
wire [29:0] _source_c_bits_legal_T_46 = _source_c_bits_legal_T_45 & 30'h1A113000; // @[Parameters.scala:137:{41,46}]
wire [29:0] _source_c_bits_legal_T_47 = _source_c_bits_legal_T_46; // @[Parameters.scala:137:46]
wire _source_c_bits_legal_T_48 = _source_c_bits_legal_T_47 == 30'h0; // @[Parameters.scala:137:{46,59}]
wire _source_c_bits_legal_T_49 = _source_c_bits_legal_T_18 | _source_c_bits_legal_T_23; // @[Parameters.scala:685:42]
wire _source_c_bits_legal_T_50 = _source_c_bits_legal_T_49 | _source_c_bits_legal_T_28; // @[Parameters.scala:685:42]
wire _source_c_bits_legal_T_51 = _source_c_bits_legal_T_50 | _source_c_bits_legal_T_33; // @[Parameters.scala:685:42]
wire _source_c_bits_legal_T_52 = _source_c_bits_legal_T_51 | _source_c_bits_legal_T_38; // @[Parameters.scala:685:42]
wire _source_c_bits_legal_T_53 = _source_c_bits_legal_T_52 | _source_c_bits_legal_T_43; // @[Parameters.scala:685:42]
wire _source_c_bits_legal_T_54 = _source_c_bits_legal_T_53 | _source_c_bits_legal_T_48; // @[Parameters.scala:685:42]
wire _source_c_bits_legal_T_55 = _source_c_bits_legal_T_13 & _source_c_bits_legal_T_54; // @[Parameters.scala:684:{29,54}, :685:42]
wire [28:0] _source_c_bits_legal_T_57 = {cam_a_0_bits_address[28:17], cam_a_0_bits_address[16:0] ^ 17'h10000}; // @[AtomicAutomata.scala:83:24]
wire [29:0] _source_c_bits_legal_T_58 = {1'h0, _source_c_bits_legal_T_57}; // @[Parameters.scala:137:{31,41}]
wire [29:0] _source_c_bits_legal_T_59 = _source_c_bits_legal_T_58 & 30'h1A110000; // @[Parameters.scala:137:{41,46}]
wire [29:0] _source_c_bits_legal_T_60 = _source_c_bits_legal_T_59; // @[Parameters.scala:137:46]
wire _source_c_bits_legal_T_61 = _source_c_bits_legal_T_60 == 30'h0; // @[Parameters.scala:137:{46,59}]
wire _source_c_bits_legal_T_64 = _source_c_bits_legal_T_63 | _source_c_bits_legal_T_55; // @[Parameters.scala:684:54, :686:26]
wire source_c_bits_legal = _source_c_bits_legal_T_64; // @[Parameters.scala:686:26]
assign source_c_bits_size = source_c_bits_a_size; // @[Edges.scala:480:17]
assign source_c_bits_source = source_c_bits_a_source; // @[Edges.scala:480:17]
assign source_c_bits_address = source_c_bits_a_address; // @[Edges.scala:480:17]
wire [7:0] _source_c_bits_a_mask_T; // @[Misc.scala:222:10]
assign source_c_bits_mask = source_c_bits_a_mask; // @[Edges.scala:480:17]
assign source_c_bits_data = source_c_bits_a_data; // @[Edges.scala:480:17]
assign source_c_bits_corrupt = source_c_bits_a_corrupt; // @[Edges.scala:480:17]
wire [1:0] source_c_bits_a_mask_sizeOH_shiftAmount = _source_c_bits_a_mask_sizeOH_T[1:0]; // @[OneHot.scala:64:49]
wire [3:0] _source_c_bits_a_mask_sizeOH_T_1 = 4'h1 << source_c_bits_a_mask_sizeOH_shiftAmount; // @[OneHot.scala:64:49, :65:12]
wire [2:0] _source_c_bits_a_mask_sizeOH_T_2 = _source_c_bits_a_mask_sizeOH_T_1[2:0]; // @[OneHot.scala:65:{12,27}]
wire [2:0] source_c_bits_a_mask_sizeOH = {_source_c_bits_a_mask_sizeOH_T_2[2:1], 1'h1}; // @[OneHot.scala:65:27]
wire source_c_bits_a_mask_sub_sub_sub_0_1 = cam_a_0_bits_size > 4'h2; // @[Misc.scala:206:21]
wire source_c_bits_a_mask_sub_sub_size = source_c_bits_a_mask_sizeOH[2]; // @[Misc.scala:202:81, :209:26]
wire source_c_bits_a_mask_sub_sub_bit = cam_a_0_bits_address[2]; // @[Misc.scala:210:26]
wire source_c_bits_a_mask_sub_sub_1_2 = source_c_bits_a_mask_sub_sub_bit; // @[Misc.scala:210:26, :214:27]
wire source_c_bits_a_mask_sub_sub_nbit = ~source_c_bits_a_mask_sub_sub_bit; // @[Misc.scala:210:26, :211:20]
wire source_c_bits_a_mask_sub_sub_0_2 = source_c_bits_a_mask_sub_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _source_c_bits_a_mask_sub_sub_acc_T = source_c_bits_a_mask_sub_sub_size & source_c_bits_a_mask_sub_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire source_c_bits_a_mask_sub_sub_0_1 = source_c_bits_a_mask_sub_sub_sub_0_1 | _source_c_bits_a_mask_sub_sub_acc_T; // @[Misc.scala:206:21, :215:{29,38}]
wire _source_c_bits_a_mask_sub_sub_acc_T_1 = source_c_bits_a_mask_sub_sub_size & source_c_bits_a_mask_sub_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire source_c_bits_a_mask_sub_sub_1_1 = source_c_bits_a_mask_sub_sub_sub_0_1 | _source_c_bits_a_mask_sub_sub_acc_T_1; // @[Misc.scala:206:21, :215:{29,38}]
wire source_c_bits_a_mask_sub_size = source_c_bits_a_mask_sizeOH[1]; // @[Misc.scala:202:81, :209:26]
wire source_c_bits_a_mask_sub_bit = cam_a_0_bits_address[1]; // @[Misc.scala:210:26]
wire source_c_bits_a_mask_sub_nbit = ~source_c_bits_a_mask_sub_bit; // @[Misc.scala:210:26, :211:20]
wire source_c_bits_a_mask_sub_0_2 = source_c_bits_a_mask_sub_sub_0_2 & source_c_bits_a_mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _source_c_bits_a_mask_sub_acc_T = source_c_bits_a_mask_sub_size & source_c_bits_a_mask_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire source_c_bits_a_mask_sub_0_1 = source_c_bits_a_mask_sub_sub_0_1 | _source_c_bits_a_mask_sub_acc_T; // @[Misc.scala:215:{29,38}]
wire source_c_bits_a_mask_sub_1_2 = source_c_bits_a_mask_sub_sub_0_2 & source_c_bits_a_mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire _source_c_bits_a_mask_sub_acc_T_1 = source_c_bits_a_mask_sub_size & source_c_bits_a_mask_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire source_c_bits_a_mask_sub_1_1 = source_c_bits_a_mask_sub_sub_0_1 | _source_c_bits_a_mask_sub_acc_T_1; // @[Misc.scala:215:{29,38}]
wire source_c_bits_a_mask_sub_2_2 = source_c_bits_a_mask_sub_sub_1_2 & source_c_bits_a_mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _source_c_bits_a_mask_sub_acc_T_2 = source_c_bits_a_mask_sub_size & source_c_bits_a_mask_sub_2_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire source_c_bits_a_mask_sub_2_1 = source_c_bits_a_mask_sub_sub_1_1 | _source_c_bits_a_mask_sub_acc_T_2; // @[Misc.scala:215:{29,38}]
wire source_c_bits_a_mask_sub_3_2 = source_c_bits_a_mask_sub_sub_1_2 & source_c_bits_a_mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire _source_c_bits_a_mask_sub_acc_T_3 = source_c_bits_a_mask_sub_size & source_c_bits_a_mask_sub_3_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire source_c_bits_a_mask_sub_3_1 = source_c_bits_a_mask_sub_sub_1_1 | _source_c_bits_a_mask_sub_acc_T_3; // @[Misc.scala:215:{29,38}]
wire source_c_bits_a_mask_size = source_c_bits_a_mask_sizeOH[0]; // @[Misc.scala:202:81, :209:26]
wire source_c_bits_a_mask_bit = cam_a_0_bits_address[0]; // @[Misc.scala:210:26]
wire source_c_bits_a_mask_nbit = ~source_c_bits_a_mask_bit; // @[Misc.scala:210:26, :211:20]
wire source_c_bits_a_mask_eq = source_c_bits_a_mask_sub_0_2 & source_c_bits_a_mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _source_c_bits_a_mask_acc_T = source_c_bits_a_mask_size & source_c_bits_a_mask_eq; // @[Misc.scala:209:26, :214:27, :215:38]
wire source_c_bits_a_mask_acc = source_c_bits_a_mask_sub_0_1 | _source_c_bits_a_mask_acc_T; // @[Misc.scala:215:{29,38}]
wire source_c_bits_a_mask_eq_1 = source_c_bits_a_mask_sub_0_2 & source_c_bits_a_mask_bit; // @[Misc.scala:210:26, :214:27]
wire _source_c_bits_a_mask_acc_T_1 = source_c_bits_a_mask_size & source_c_bits_a_mask_eq_1; // @[Misc.scala:209:26, :214:27, :215:38]
wire source_c_bits_a_mask_acc_1 = source_c_bits_a_mask_sub_0_1 | _source_c_bits_a_mask_acc_T_1; // @[Misc.scala:215:{29,38}]
wire source_c_bits_a_mask_eq_2 = source_c_bits_a_mask_sub_1_2 & source_c_bits_a_mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _source_c_bits_a_mask_acc_T_2 = source_c_bits_a_mask_size & source_c_bits_a_mask_eq_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire source_c_bits_a_mask_acc_2 = source_c_bits_a_mask_sub_1_1 | _source_c_bits_a_mask_acc_T_2; // @[Misc.scala:215:{29,38}]
wire source_c_bits_a_mask_eq_3 = source_c_bits_a_mask_sub_1_2 & source_c_bits_a_mask_bit; // @[Misc.scala:210:26, :214:27]
wire _source_c_bits_a_mask_acc_T_3 = source_c_bits_a_mask_size & source_c_bits_a_mask_eq_3; // @[Misc.scala:209:26, :214:27, :215:38]
wire source_c_bits_a_mask_acc_3 = source_c_bits_a_mask_sub_1_1 | _source_c_bits_a_mask_acc_T_3; // @[Misc.scala:215:{29,38}]
wire source_c_bits_a_mask_eq_4 = source_c_bits_a_mask_sub_2_2 & source_c_bits_a_mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _source_c_bits_a_mask_acc_T_4 = source_c_bits_a_mask_size & source_c_bits_a_mask_eq_4; // @[Misc.scala:209:26, :214:27, :215:38]
wire source_c_bits_a_mask_acc_4 = source_c_bits_a_mask_sub_2_1 | _source_c_bits_a_mask_acc_T_4; // @[Misc.scala:215:{29,38}]
wire source_c_bits_a_mask_eq_5 = source_c_bits_a_mask_sub_2_2 & source_c_bits_a_mask_bit; // @[Misc.scala:210:26, :214:27]
wire _source_c_bits_a_mask_acc_T_5 = source_c_bits_a_mask_size & source_c_bits_a_mask_eq_5; // @[Misc.scala:209:26, :214:27, :215:38]
wire source_c_bits_a_mask_acc_5 = source_c_bits_a_mask_sub_2_1 | _source_c_bits_a_mask_acc_T_5; // @[Misc.scala:215:{29,38}]
wire source_c_bits_a_mask_eq_6 = source_c_bits_a_mask_sub_3_2 & source_c_bits_a_mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _source_c_bits_a_mask_acc_T_6 = source_c_bits_a_mask_size & source_c_bits_a_mask_eq_6; // @[Misc.scala:209:26, :214:27, :215:38]
wire source_c_bits_a_mask_acc_6 = source_c_bits_a_mask_sub_3_1 | _source_c_bits_a_mask_acc_T_6; // @[Misc.scala:215:{29,38}]
wire source_c_bits_a_mask_eq_7 = source_c_bits_a_mask_sub_3_2 & source_c_bits_a_mask_bit; // @[Misc.scala:210:26, :214:27]
wire _source_c_bits_a_mask_acc_T_7 = source_c_bits_a_mask_size & source_c_bits_a_mask_eq_7; // @[Misc.scala:209:26, :214:27, :215:38]
wire source_c_bits_a_mask_acc_7 = source_c_bits_a_mask_sub_3_1 | _source_c_bits_a_mask_acc_T_7; // @[Misc.scala:215:{29,38}]
wire [1:0] source_c_bits_a_mask_lo_lo = {source_c_bits_a_mask_acc_1, source_c_bits_a_mask_acc}; // @[Misc.scala:215:29, :222:10]
wire [1:0] source_c_bits_a_mask_lo_hi = {source_c_bits_a_mask_acc_3, source_c_bits_a_mask_acc_2}; // @[Misc.scala:215:29, :222:10]
wire [3:0] source_c_bits_a_mask_lo = {source_c_bits_a_mask_lo_hi, source_c_bits_a_mask_lo_lo}; // @[Misc.scala:222:10]
wire [1:0] source_c_bits_a_mask_hi_lo = {source_c_bits_a_mask_acc_5, source_c_bits_a_mask_acc_4}; // @[Misc.scala:215:29, :222:10]
wire [1:0] source_c_bits_a_mask_hi_hi = {source_c_bits_a_mask_acc_7, source_c_bits_a_mask_acc_6}; // @[Misc.scala:215:29, :222:10]
wire [3:0] source_c_bits_a_mask_hi = {source_c_bits_a_mask_hi_hi, source_c_bits_a_mask_hi_lo}; // @[Misc.scala:222:10]
assign _source_c_bits_a_mask_T = {source_c_bits_a_mask_hi, source_c_bits_a_mask_lo}; // @[Misc.scala:222:10]
assign source_c_bits_a_mask = _source_c_bits_a_mask_T; // @[Misc.scala:222:10]
wire [26:0] _decode_T = 27'hFFF << nodeIn_a_bits_size; // @[package.scala:243:71]
wire [11:0] _decode_T_1 = _decode_T[11:0]; // @[package.scala:243:{71,76}]
wire [11:0] _decode_T_2 = ~_decode_T_1; // @[package.scala:243:{46,76}]
wire [8:0] decode = _decode_T_2[11:3]; // @[package.scala:243:46]
wire _opdata_T = nodeIn_a_bits_opcode[2]; // @[Edges.scala:92:37]
wire opdata = ~_opdata_T; // @[Edges.scala:92:{28,37}]
reg [8:0] beatsLeft; // @[Arbiter.scala:60:30]
wire idle = beatsLeft == 9'h0; // @[Arbiter.scala:60:30, :61:28]
wire latch = idle & nodeOut_a_ready; // @[Arbiter.scala:61:28, :62:24]
wire [1:0] _readys_T = {source_i_valid, source_c_valid}; // @[AtomicAutomata.scala:154:28, :165:28]
wire [2:0] _readys_T_1 = {_readys_T, 1'h0}; // @[package.scala:253:48]
wire [1:0] _readys_T_2 = _readys_T_1[1:0]; // @[package.scala:253:{48,53}]
wire [1:0] _readys_T_3 = _readys_T | _readys_T_2; // @[package.scala:253:{43,53}]
wire [1:0] _readys_T_4 = _readys_T_3; // @[package.scala:253:43, :254:17]
wire [2:0] _readys_T_5 = {_readys_T_4, 1'h0}; // @[package.scala:254:17]
wire [1:0] _readys_T_6 = _readys_T_5[1:0]; // @[Arbiter.scala:16:{78,83}]
wire [1:0] _readys_T_7 = ~_readys_T_6; // @[Arbiter.scala:16:{61,83}]
wire _readys_T_8 = _readys_T_7[0]; // @[Arbiter.scala:16:61, :68:76]
wire readys_0 = _readys_T_8; // @[Arbiter.scala:68:{27,76}]
wire _readys_T_9 = _readys_T_7[1]; // @[Arbiter.scala:16:61, :68:76]
wire readys_1 = _readys_T_9; // @[Arbiter.scala:68:{27,76}]
wire _winner_T = readys_0 & source_c_valid; // @[AtomicAutomata.scala:165:28]
wire winner_0 = _winner_T; // @[Arbiter.scala:71:{27,69}]
wire _winner_T_1 = readys_1 & source_i_valid; // @[AtomicAutomata.scala:154:28]
wire winner_1 = _winner_T_1; // @[Arbiter.scala:71:{27,69}]
wire prefixOR_1 = winner_0; // @[Arbiter.scala:71:27, :76:48]
wire _prefixOR_T = prefixOR_1 | winner_1; // @[Arbiter.scala:71:27, :76:48]
wire _nodeOut_a_valid_T = source_c_valid | source_i_valid; // @[AtomicAutomata.scala:154:28, :165:28] |
Generate the Verilog code corresponding to the following Chisel files.
File Monitor.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceLine
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import freechips.rocketchip.diplomacy.EnableMonitors
import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode}
import freechips.rocketchip.util.PlusArg
case class TLMonitorArgs(edge: TLEdge)
abstract class TLMonitorBase(args: TLMonitorArgs) extends Module
{
val io = IO(new Bundle {
val in = Input(new TLBundle(args.edge.bundle))
})
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit
legalize(io.in, args.edge, reset)
}
object TLMonitor {
def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = {
if (enable) {
EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) }
} else { node }
}
}
class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args)
{
require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal))
val cover_prop_class = PropertyClass.Default
//Like assert but can flip to being an assumption for formal verification
def monAssert(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir, cond, message, PropertyClass.Default)
}
def assume(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir.flip, cond, message, PropertyClass.Default)
}
def extra = {
args.edge.sourceInfo match {
case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)"
case _ => ""
}
}
def visible(address: UInt, source: UInt, edge: TLEdge) =
edge.client.clients.map { c =>
!c.sourceId.contains(source) ||
c.visibility.map(_.contains(address)).reduce(_ || _)
}.reduce(_ && _)
def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = {
//switch this flag to turn on diplomacy in error messages
def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n"
monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra)
// Reuse these subexpressions to save some firrtl lines
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility")
//The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B
//TODO: check for acquireT?
when (bundle.opcode === TLMessages.AcquireBlock) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AcquirePerm) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra)
monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra)
monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra)
monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra)
}
}
def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = {
monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility")
// Reuse these subexpressions to save some firrtl lines
val address_ok = edge.manager.containsSafe(edge.address(bundle))
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source
when (bundle.opcode === TLMessages.Probe) {
assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra)
assume (address_ok, "'B' channel Probe carries unmanaged address" + extra)
assume (legal_source, "'B' channel Probe carries source that is not first source" + extra)
assume (is_aligned, "'B' channel Probe address not aligned to size" + extra)
assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra)
assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra)
assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra)
monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra)
monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra)
}
}
def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = {
monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val address_ok = edge.manager.containsSafe(edge.address(bundle))
monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility")
when (bundle.opcode === TLMessages.ProbeAck) {
monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ProbeAckData) {
monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.Release) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ReleaseData) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra)
}
}
def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = {
assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val sink_ok = bundle.sink < edge.manager.endSinkId.U
val deny_put_ok = edge.manager.mayDenyPut.B
val deny_get_ok = edge.manager.mayDenyGet.B
when (bundle.opcode === TLMessages.ReleaseAck) {
assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra)
assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra)
assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra)
}
when (bundle.opcode === TLMessages.Grant) {
assume (source_ok, "'D' channel Grant carries invalid source ID" + extra)
assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra)
assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra)
}
when (bundle.opcode === TLMessages.GrantData) {
assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra)
assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra)
}
}
def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = {
val sink_ok = bundle.sink < edge.manager.endSinkId.U
monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra)
}
def legalizeFormat(bundle: TLBundle, edge: TLEdge) = {
when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) }
when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) }
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) }
when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) }
when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) }
} else {
monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra)
monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra)
monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra)
}
}
def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = {
val a_first = edge.first(a.bits, a.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (a.valid && !a_first) {
monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra)
monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra)
monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra)
monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra)
monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra)
}
when (a.fire && a_first) {
opcode := a.bits.opcode
param := a.bits.param
size := a.bits.size
source := a.bits.source
address := a.bits.address
}
}
def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = {
val b_first = edge.first(b.bits, b.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (b.valid && !b_first) {
monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra)
monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra)
monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra)
monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra)
monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra)
}
when (b.fire && b_first) {
opcode := b.bits.opcode
param := b.bits.param
size := b.bits.size
source := b.bits.source
address := b.bits.address
}
}
def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = {
// Symbolic variable
val sym_source = Wire(UInt(edge.client.endSourceId.W))
// TODO: Connect sym_source to a fixed value for simulation and to a
// free wire in formal
sym_source := 0.U
// Type casting Int to UInt
val maxSourceId = Wire(UInt(edge.client.endSourceId.W))
maxSourceId := edge.client.endSourceId.U
// Delayed verison of sym_source
val sym_source_d = Reg(UInt(edge.client.endSourceId.W))
sym_source_d := sym_source
// These will be constraints for FV setup
Property(
MonitorDirection.Monitor,
(sym_source === sym_source_d),
"sym_source should remain stable",
PropertyClass.Default)
Property(
MonitorDirection.Monitor,
(sym_source <= maxSourceId),
"sym_source should take legal value",
PropertyClass.Default)
val my_resp_pend = RegInit(false.B)
val my_opcode = Reg(UInt())
val my_size = Reg(UInt())
val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire)
val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire)
val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source)
val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source)
val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat)
val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend)
when (my_set_resp_pend) {
my_resp_pend := true.B
} .elsewhen (my_clr_resp_pend) {
my_resp_pend := false.B
}
when (my_a_first_beat) {
my_opcode := bundle.a.bits.opcode
my_size := bundle.a.bits.size
}
val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size)
val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode)
val my_resp_opcode_legal = Wire(Bool())
when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) ||
(my_resp_opcode === TLMessages.LogicalData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData)
} .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck)
} .otherwise {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck)
}
monAssert (IfThen(my_resp_pend, !my_a_first_beat),
"Request message should not be sent with a source ID, for which a response message" +
"is already pending (not received until current cycle) for a prior request message" +
"with the same source ID" + extra)
assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)),
"Response message should be accepted with a source ID only if a request message with the" +
"same source ID has been accepted or is being accepted in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)),
"Response message should be sent with a source ID only if a request message with the" +
"same source ID has been accepted or is being sent in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)),
"If d_valid is 1, then d_size should be same as a_size of the corresponding request" +
"message" + extra)
assume (IfThen(my_d_first_beat, my_resp_opcode_legal),
"If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" +
"request message" + extra)
}
def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = {
val c_first = edge.first(c.bits, c.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (c.valid && !c_first) {
monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra)
monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra)
monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra)
monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra)
monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra)
}
when (c.fire && c_first) {
opcode := c.bits.opcode
param := c.bits.param
size := c.bits.size
source := c.bits.source
address := c.bits.address
}
}
def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = {
val d_first = edge.first(d.bits, d.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val sink = Reg(UInt())
val denied = Reg(Bool())
when (d.valid && !d_first) {
assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra)
assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra)
assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra)
assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra)
assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra)
assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra)
}
when (d.fire && d_first) {
opcode := d.bits.opcode
param := d.bits.param
size := d.bits.size
source := d.bits.source
sink := d.bits.sink
denied := d.bits.denied
}
}
def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = {
legalizeMultibeatA(bundle.a, edge)
legalizeMultibeatD(bundle.d, edge)
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
legalizeMultibeatB(bundle.b, edge)
legalizeMultibeatC(bundle.c, edge)
}
}
//This is left in for almond which doesn't adhere to the tilelink protocol
@deprecated("Use legalizeADSource instead if possible","")
def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.client.endSourceId.W))
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val a_set = WireInit(0.U(edge.client.endSourceId.W))
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra)
}
if (edge.manager.minLatency > 0) {
assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = {
val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size)
val log_a_size_bus_size = log2Ceil(a_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error
inflight.suggestName("inflight")
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
inflight_opcodes.suggestName("inflight_opcodes")
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
inflight_sizes.suggestName("inflight_sizes")
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
a_first.suggestName("a_first")
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
d_first.suggestName("d_first")
val a_set = WireInit(0.U(edge.client.endSourceId.W))
val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
a_set.suggestName("a_set")
a_set_wo_ready.suggestName("a_set_wo_ready")
val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
a_opcodes_set.suggestName("a_opcodes_set")
val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
a_sizes_set.suggestName("a_sizes_set")
val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W))
a_opcode_lookup.suggestName("a_opcode_lookup")
a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U
val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W))
a_size_lookup.suggestName("a_size_lookup")
a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U
val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant))
val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant))
val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W))
a_opcodes_set_interm.suggestName("a_opcodes_set_interm")
val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W))
a_sizes_set_interm.suggestName("a_sizes_set_interm")
when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) {
a_set_wo_ready := UIntToOH(bundle.a.bits.source)
}
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U
a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U
a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U)
a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U)
monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
d_opcodes_clr.suggestName("d_opcodes_clr")
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) ||
(bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra)
assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) ||
(bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra)
assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) {
assume((!bundle.d.ready) || bundle.a.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = {
val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size)
val log_c_size_bus_size = log2Ceil(c_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W))
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
inflight.suggestName("inflight")
inflight_opcodes.suggestName("inflight_opcodes")
inflight_sizes.suggestName("inflight_sizes")
val c_first = edge.first(bundle.c.bits, bundle.c.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
c_first.suggestName("c_first")
d_first.suggestName("d_first")
val c_set = WireInit(0.U(edge.client.endSourceId.W))
val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
c_set.suggestName("c_set")
c_set_wo_ready.suggestName("c_set_wo_ready")
c_opcodes_set.suggestName("c_opcodes_set")
c_sizes_set.suggestName("c_sizes_set")
val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W))
val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W))
c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U
c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U
c_opcode_lookup.suggestName("c_opcode_lookup")
c_size_lookup.suggestName("c_size_lookup")
val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W))
val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W))
c_opcodes_set_interm.suggestName("c_opcodes_set_interm")
c_sizes_set_interm.suggestName("c_sizes_set_interm")
when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) {
c_set_wo_ready := UIntToOH(bundle.c.bits.source)
}
when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) {
c_set := UIntToOH(bundle.c.bits.source)
c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U
c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U
c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U)
c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U)
monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra)
}
val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
d_opcodes_clr.suggestName("d_opcodes_clr")
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) {
assume((!bundle.d.ready) || bundle.c.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
when (c_set_wo_ready.orR) {
assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra)
}
}
inflight := (inflight | c_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.manager.endSinkId.W))
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val e_first = true.B
val d_set = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) {
d_set := UIntToOH(bundle.d.bits.sink)
assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra)
}
val e_clr = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) {
e_clr := UIntToOH(bundle.e.bits.sink)
monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra)
}
// edge.client.minLatency applies to BC, not DE
inflight := (inflight | d_set) & ~e_clr
}
def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = {
val sourceBits = log2Ceil(edge.client.endSourceId)
val tooBig = 14 // >16kB worth of flight information gets to be too much
if (sourceBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked")
} else {
if (args.edge.params(TestplanTestType).simulation) {
if (args.edge.params(TLMonitorStrictMode)) {
legalizeADSource(bundle, edge)
legalizeCDSource(bundle, edge)
} else {
legalizeADSourceOld(bundle, edge)
}
}
if (args.edge.params(TestplanTestType).formal) {
legalizeADSourceFormal(bundle, edge)
}
}
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
// legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize...
val sinkBits = log2Ceil(edge.manager.endSinkId)
if (sinkBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked")
} else {
legalizeDESink(bundle, edge)
}
}
}
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = {
legalizeFormat (bundle, edge)
legalizeMultibeat (bundle, edge)
legalizeUnique (bundle, edge)
}
}
File Misc.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
import chisel3.util.random.LFSR
import org.chipsalliance.cde.config.Parameters
import scala.math._
class ParameterizedBundle(implicit p: Parameters) extends Bundle
trait Clocked extends Bundle {
val clock = Clock()
val reset = Bool()
}
object DecoupledHelper {
def apply(rvs: Bool*) = new DecoupledHelper(rvs)
}
class DecoupledHelper(val rvs: Seq[Bool]) {
def fire(exclude: Bool, includes: Bool*) = {
require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!")
(rvs.filter(_ ne exclude) ++ includes).reduce(_ && _)
}
def fire() = {
rvs.reduce(_ && _)
}
}
object MuxT {
def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2))
def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3))
def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4))
}
/** Creates a cascade of n MuxTs to search for a key value. */
object MuxTLookup {
def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
}
object ValidMux {
def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = {
apply(v1 +: v2.toSeq)
}
def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = {
val out = Wire(Valid(valids.head.bits.cloneType))
out.valid := valids.map(_.valid).reduce(_ || _)
out.bits := MuxCase(valids.head.bits,
valids.map(v => (v.valid -> v.bits)))
out
}
}
object Str
{
def apply(s: String): UInt = {
var i = BigInt(0)
require(s.forall(validChar _))
for (c <- s)
i = (i << 8) | c
i.U((s.length*8).W)
}
def apply(x: Char): UInt = {
require(validChar(x))
x.U(8.W)
}
def apply(x: UInt): UInt = apply(x, 10)
def apply(x: UInt, radix: Int): UInt = {
val rad = radix.U
val w = x.getWidth
require(w > 0)
var q = x
var s = digit(q % rad)
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s)
}
s
}
def apply(x: SInt): UInt = apply(x, 10)
def apply(x: SInt, radix: Int): UInt = {
val neg = x < 0.S
val abs = x.abs.asUInt
if (radix != 10) {
Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix))
} else {
val rad = radix.U
val w = abs.getWidth
require(w > 0)
var q = abs
var s = digit(q % rad)
var needSign = neg
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
val placeSpace = q === 0.U
val space = Mux(needSign, Str('-'), Str(' '))
needSign = needSign && !placeSpace
s = Cat(Mux(placeSpace, space, digit(q % rad)), s)
}
Cat(Mux(needSign, Str('-'), Str(' ')), s)
}
}
private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0)
private def validChar(x: Char) = x == (x & 0xFF)
}
object Split
{
def apply(x: UInt, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n2: Int, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
}
object Random
{
def apply(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0)
else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod))
}
def apply(mod: Int): UInt = apply(mod, randomizer)
def oneHot(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0))
else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt
}
def oneHot(mod: Int): UInt = oneHot(mod, randomizer)
private def randomizer = LFSR(16)
private def partition(value: UInt, slices: Int) =
Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U)
}
object Majority {
def apply(in: Set[Bool]): Bool = {
val n = (in.size >> 1) + 1
val clauses = in.subsets(n).map(_.reduce(_ && _))
clauses.reduce(_ || _)
}
def apply(in: Seq[Bool]): Bool = apply(in.toSet)
def apply(in: UInt): Bool = apply(in.asBools.toSet)
}
object PopCountAtLeast {
private def two(x: UInt): (Bool, Bool) = x.getWidth match {
case 1 => (x.asBool, false.B)
case n =>
val half = x.getWidth / 2
val (leftOne, leftTwo) = two(x(half - 1, 0))
val (rightOne, rightTwo) = two(x(x.getWidth - 1, half))
(leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne))
}
def apply(x: UInt, n: Int): Bool = n match {
case 0 => true.B
case 1 => x.orR
case 2 => two(x)._2
case 3 => PopCount(x) >= n.U
}
}
// This gets used everywhere, so make the smallest circuit possible ...
// Given an address and size, create a mask of beatBytes size
// eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111
// groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01
object MaskGen {
def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = {
require (groupBy >= 1 && beatBytes >= groupBy)
require (isPow2(beatBytes) && isPow2(groupBy))
val lgBytes = log2Ceil(beatBytes)
val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U
def helper(i: Int): Seq[(Bool, Bool)] = {
if (i == 0) {
Seq((lgSize >= lgBytes.asUInt, true.B))
} else {
val sub = helper(i-1)
val size = sizeOH(lgBytes - i)
val bit = addr_lo(lgBytes - i)
val nbit = !bit
Seq.tabulate (1 << i) { j =>
val (sub_acc, sub_eq) = sub(j/2)
val eq = sub_eq && (if (j % 2 == 1) bit else nbit)
val acc = sub_acc || (size && eq)
(acc, eq)
}
}
}
if (groupBy == beatBytes) 1.U else
Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse)
}
}
File PlusArg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.experimental._
import chisel3.util.HasBlackBoxResource
@deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05")
case class PlusArgInfo(default: BigInt, docstring: String)
/** Case class for PlusArg information
*
* @tparam A scala type of the PlusArg value
* @param default optional default value
* @param docstring text to include in the help
* @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT)
*/
private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String)
/** Typeclass for converting a type to a doctype string
* @tparam A some type
*/
trait Doctypeable[A] {
/** Return the doctype string for some option */
def toDoctype(a: Option[A]): String
}
/** Object containing implementations of the Doctypeable typeclass */
object Doctypes {
/** Converts an Int => "INT" */
implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" }
/** Converts a BigInt => "INT" */
implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" }
/** Converts a String => "STRING" */
implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" }
}
class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map(
"FORMAT" -> StringParam(format),
"DEFAULT" -> IntParam(default),
"WIDTH" -> IntParam(width)
)) with HasBlackBoxResource {
val io = IO(new Bundle {
val out = Output(UInt(width.W))
})
addResource("/vsrc/plusarg_reader.v")
}
/* This wrapper class has no outputs, making it clear it is a simulation-only construct */
class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module {
val io = IO(new Bundle {
val count = Input(UInt(width.W))
})
val max = Module(new plusarg_reader(format, default, docstring, width)).io.out
when (max > 0.U) {
assert (io.count < max, s"Timeout exceeded: $docstring")
}
}
import Doctypes._
object PlusArg
{
/** PlusArg("foo") will return 42.U if the simulation is run with +foo=42
* Do not use this as an initial register value. The value is set in an
* initial block and thus accessing it from another initial is racey.
* Add a docstring to document the arg, which can be dumped in an elaboration
* pass.
*/
def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out
}
/** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert
* to kill the simulation when count exceeds the specified integer argument.
* Default 0 will never assert.
*/
def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count
}
}
object PlusArgArtefacts {
private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty
/* Add a new PlusArg */
@deprecated(
"Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08",
"Rocket Chip 2020.05"
)
def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring)
/** Add a new PlusArg
*
* @tparam A scala type of the PlusArg value
* @param name name for the PlusArg
* @param default optional default value
* @param docstring text to include in the help
*/
def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit =
artefacts = artefacts ++
Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default)))
/* From plus args, generate help text */
private def serializeHelp_cHeader(tab: String = ""): String = artefacts
.map{ case(arg, info) =>
s"""|$tab+$arg=${info.doctype}\\n\\
|$tab${" "*20}${info.docstring}\\n\\
|""".stripMargin ++ info.default.map{ case default =>
s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("")
}.toSeq.mkString("\\n\\\n") ++ "\""
/* From plus args, generate a char array of their names */
private def serializeArray_cHeader(tab: String = ""): String = {
val prettyTab = tab + " " * 44 // Length of 'static const ...'
s"${tab}static const char * verilog_plusargs [] = {\\\n" ++
artefacts
.map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" }
.mkString("")++
s"${prettyTab}0};"
}
/* Generate C code to be included in emulator.cc that helps with
* argument parsing based on available Verilog PlusArgs */
def serialize_cHeader(): String =
s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\
|${serializeHelp_cHeader(" "*7)}
|${serializeArray_cHeader()}
|""".stripMargin
}
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
File Bundles.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import freechips.rocketchip.util._
import scala.collection.immutable.ListMap
import chisel3.util.Decoupled
import chisel3.util.DecoupledIO
import chisel3.reflect.DataMirror
abstract class TLBundleBase(val params: TLBundleParameters) extends Bundle
// common combos in lazy policy:
// Put + Acquire
// Release + AccessAck
object TLMessages
{
// A B C D E
def PutFullData = 0.U // . . => AccessAck
def PutPartialData = 1.U // . . => AccessAck
def ArithmeticData = 2.U // . . => AccessAckData
def LogicalData = 3.U // . . => AccessAckData
def Get = 4.U // . . => AccessAckData
def Hint = 5.U // . . => HintAck
def AcquireBlock = 6.U // . => Grant[Data]
def AcquirePerm = 7.U // . => Grant[Data]
def Probe = 6.U // . => ProbeAck[Data]
def AccessAck = 0.U // . .
def AccessAckData = 1.U // . .
def HintAck = 2.U // . .
def ProbeAck = 4.U // .
def ProbeAckData = 5.U // .
def Release = 6.U // . => ReleaseAck
def ReleaseData = 7.U // . => ReleaseAck
def Grant = 4.U // . => GrantAck
def GrantData = 5.U // . => GrantAck
def ReleaseAck = 6.U // .
def GrantAck = 0.U // .
def isA(x: UInt) = x <= AcquirePerm
def isB(x: UInt) = x <= Probe
def isC(x: UInt) = x <= ReleaseData
def isD(x: UInt) = x <= ReleaseAck
def adResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, Grant, Grant)
def bcResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, ProbeAck, ProbeAck)
def a = Seq( ("PutFullData",TLPermissions.PermMsgReserved),
("PutPartialData",TLPermissions.PermMsgReserved),
("ArithmeticData",TLAtomics.ArithMsg),
("LogicalData",TLAtomics.LogicMsg),
("Get",TLPermissions.PermMsgReserved),
("Hint",TLHints.HintsMsg),
("AcquireBlock",TLPermissions.PermMsgGrow),
("AcquirePerm",TLPermissions.PermMsgGrow))
def b = Seq( ("PutFullData",TLPermissions.PermMsgReserved),
("PutPartialData",TLPermissions.PermMsgReserved),
("ArithmeticData",TLAtomics.ArithMsg),
("LogicalData",TLAtomics.LogicMsg),
("Get",TLPermissions.PermMsgReserved),
("Hint",TLHints.HintsMsg),
("Probe",TLPermissions.PermMsgCap))
def c = Seq( ("AccessAck",TLPermissions.PermMsgReserved),
("AccessAckData",TLPermissions.PermMsgReserved),
("HintAck",TLPermissions.PermMsgReserved),
("Invalid Opcode",TLPermissions.PermMsgReserved),
("ProbeAck",TLPermissions.PermMsgReport),
("ProbeAckData",TLPermissions.PermMsgReport),
("Release",TLPermissions.PermMsgReport),
("ReleaseData",TLPermissions.PermMsgReport))
def d = Seq( ("AccessAck",TLPermissions.PermMsgReserved),
("AccessAckData",TLPermissions.PermMsgReserved),
("HintAck",TLPermissions.PermMsgReserved),
("Invalid Opcode",TLPermissions.PermMsgReserved),
("Grant",TLPermissions.PermMsgCap),
("GrantData",TLPermissions.PermMsgCap),
("ReleaseAck",TLPermissions.PermMsgReserved))
}
/**
* The three primary TileLink permissions are:
* (T)runk: the agent is (or is on inwards path to) the global point of serialization.
* (B)ranch: the agent is on an outwards path to
* (N)one:
* These permissions are permuted by transfer operations in various ways.
* Operations can cap permissions, request for them to be grown or shrunk,
* or for a report on their current status.
*/
object TLPermissions
{
val aWidth = 2
val bdWidth = 2
val cWidth = 3
// Cap types (Grant = new permissions, Probe = permisions <= target)
def toT = 0.U(bdWidth.W)
def toB = 1.U(bdWidth.W)
def toN = 2.U(bdWidth.W)
def isCap(x: UInt) = x <= toN
// Grow types (Acquire = permissions >= target)
def NtoB = 0.U(aWidth.W)
def NtoT = 1.U(aWidth.W)
def BtoT = 2.U(aWidth.W)
def isGrow(x: UInt) = x <= BtoT
// Shrink types (ProbeAck, Release)
def TtoB = 0.U(cWidth.W)
def TtoN = 1.U(cWidth.W)
def BtoN = 2.U(cWidth.W)
def isShrink(x: UInt) = x <= BtoN
// Report types (ProbeAck, Release)
def TtoT = 3.U(cWidth.W)
def BtoB = 4.U(cWidth.W)
def NtoN = 5.U(cWidth.W)
def isReport(x: UInt) = x <= NtoN
def PermMsgGrow:Seq[String] = Seq("Grow NtoB", "Grow NtoT", "Grow BtoT")
def PermMsgCap:Seq[String] = Seq("Cap toT", "Cap toB", "Cap toN")
def PermMsgReport:Seq[String] = Seq("Shrink TtoB", "Shrink TtoN", "Shrink BtoN", "Report TotT", "Report BtoB", "Report NtoN")
def PermMsgReserved:Seq[String] = Seq("Reserved")
}
object TLAtomics
{
val width = 3
// Arithmetic types
def MIN = 0.U(width.W)
def MAX = 1.U(width.W)
def MINU = 2.U(width.W)
def MAXU = 3.U(width.W)
def ADD = 4.U(width.W)
def isArithmetic(x: UInt) = x <= ADD
// Logical types
def XOR = 0.U(width.W)
def OR = 1.U(width.W)
def AND = 2.U(width.W)
def SWAP = 3.U(width.W)
def isLogical(x: UInt) = x <= SWAP
def ArithMsg:Seq[String] = Seq("MIN", "MAX", "MINU", "MAXU", "ADD")
def LogicMsg:Seq[String] = Seq("XOR", "OR", "AND", "SWAP")
}
object TLHints
{
val width = 1
def PREFETCH_READ = 0.U(width.W)
def PREFETCH_WRITE = 1.U(width.W)
def isHints(x: UInt) = x <= PREFETCH_WRITE
def HintsMsg:Seq[String] = Seq("PrefetchRead", "PrefetchWrite")
}
sealed trait TLChannel extends TLBundleBase {
val channelName: String
}
sealed trait TLDataChannel extends TLChannel
sealed trait TLAddrChannel extends TLDataChannel
final class TLBundleA(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleA_${params.shortName}"
val channelName = "'A' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(List(TLAtomics.width, TLPermissions.aWidth, TLHints.width).max.W) // amo_opcode || grow perms || hint
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // from
val address = UInt(params.addressBits.W) // to
val user = BundleMap(params.requestFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val mask = UInt((params.dataBits/8).W)
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleB(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleB_${params.shortName}"
val channelName = "'B' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.bdWidth.W) // cap perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // to
val address = UInt(params.addressBits.W) // from
// variable fields during multibeat:
val mask = UInt((params.dataBits/8).W)
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleC(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleC_${params.shortName}"
val channelName = "'C' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.cWidth.W) // shrink or report perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // from
val address = UInt(params.addressBits.W) // to
val user = BundleMap(params.requestFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleD(params: TLBundleParameters)
extends TLBundleBase(params) with TLDataChannel
{
override def typeName = s"TLBundleD_${params.shortName}"
val channelName = "'D' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.bdWidth.W) // cap perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // to
val sink = UInt(params.sinkBits.W) // from
val denied = Bool() // implies corrupt iff *Data
val user = BundleMap(params.responseFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleE(params: TLBundleParameters)
extends TLBundleBase(params) with TLChannel
{
override def typeName = s"TLBundleE_${params.shortName}"
val channelName = "'E' channel"
val sink = UInt(params.sinkBits.W) // to
}
class TLBundle(val params: TLBundleParameters) extends Record
{
// Emulate a Bundle with elements abcde or ad depending on params.hasBCE
private val optA = Some (Decoupled(new TLBundleA(params)))
private val optB = params.hasBCE.option(Flipped(Decoupled(new TLBundleB(params))))
private val optC = params.hasBCE.option(Decoupled(new TLBundleC(params)))
private val optD = Some (Flipped(Decoupled(new TLBundleD(params))))
private val optE = params.hasBCE.option(Decoupled(new TLBundleE(params)))
def a: DecoupledIO[TLBundleA] = optA.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleA(params)))))
def b: DecoupledIO[TLBundleB] = optB.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleB(params)))))
def c: DecoupledIO[TLBundleC] = optC.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleC(params)))))
def d: DecoupledIO[TLBundleD] = optD.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleD(params)))))
def e: DecoupledIO[TLBundleE] = optE.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleE(params)))))
val elements =
if (params.hasBCE) ListMap("e" -> e, "d" -> d, "c" -> c, "b" -> b, "a" -> a)
else ListMap("d" -> d, "a" -> a)
def tieoff(): Unit = {
DataMirror.specifiedDirectionOf(a.ready) match {
case SpecifiedDirection.Input =>
a.ready := false.B
c.ready := false.B
e.ready := false.B
b.valid := false.B
d.valid := false.B
case SpecifiedDirection.Output =>
a.valid := false.B
c.valid := false.B
e.valid := false.B
b.ready := false.B
d.ready := false.B
case _ =>
}
}
}
object TLBundle
{
def apply(params: TLBundleParameters) = new TLBundle(params)
}
class TLAsyncBundleBase(val params: TLAsyncBundleParameters) extends Bundle
class TLAsyncBundle(params: TLAsyncBundleParameters) extends TLAsyncBundleBase(params)
{
val a = new AsyncBundle(new TLBundleA(params.base), params.async)
val b = Flipped(new AsyncBundle(new TLBundleB(params.base), params.async))
val c = new AsyncBundle(new TLBundleC(params.base), params.async)
val d = Flipped(new AsyncBundle(new TLBundleD(params.base), params.async))
val e = new AsyncBundle(new TLBundleE(params.base), params.async)
}
class TLRationalBundle(params: TLBundleParameters) extends TLBundleBase(params)
{
val a = RationalIO(new TLBundleA(params))
val b = Flipped(RationalIO(new TLBundleB(params)))
val c = RationalIO(new TLBundleC(params))
val d = Flipped(RationalIO(new TLBundleD(params)))
val e = RationalIO(new TLBundleE(params))
}
class TLCreditedBundle(params: TLBundleParameters) extends TLBundleBase(params)
{
val a = CreditedIO(new TLBundleA(params))
val b = Flipped(CreditedIO(new TLBundleB(params)))
val c = CreditedIO(new TLBundleC(params))
val d = Flipped(CreditedIO(new TLBundleD(params)))
val e = CreditedIO(new TLBundleE(params))
}
File Parameters.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.diplomacy
import chisel3._
import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor}
import freechips.rocketchip.util.ShiftQueue
/** Options for describing the attributes of memory regions */
object RegionType {
// Define the 'more relaxed than' ordering
val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS)
sealed trait T extends Ordered[T] {
def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this)
}
case object CACHED extends T // an intermediate agent may have cached a copy of the region for you
case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided
case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible
case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached
case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects
case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed
case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively
}
// A non-empty half-open range; [start, end)
case class IdRange(start: Int, end: Int) extends Ordered[IdRange]
{
require (start >= 0, s"Ids cannot be negative, but got: $start.")
require (start <= end, "Id ranges cannot be negative.")
def compare(x: IdRange) = {
val primary = (this.start - x.start).signum
val secondary = (x.end - this.end).signum
if (primary != 0) primary else secondary
}
def overlaps(x: IdRange) = start < x.end && x.start < end
def contains(x: IdRange) = start <= x.start && x.end <= end
def contains(x: Int) = start <= x && x < end
def contains(x: UInt) =
if (size == 0) {
false.B
} else if (size == 1) { // simple comparison
x === start.U
} else {
// find index of largest different bit
val largestDeltaBit = log2Floor(start ^ (end-1))
val smallestCommonBit = largestDeltaBit + 1 // may not exist in x
val uncommonMask = (1 << smallestCommonBit) - 1
val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0)
// the prefix must match exactly (note: may shift ALL bits away)
(x >> smallestCommonBit) === (start >> smallestCommonBit).U &&
// firrtl constant prop range analysis can eliminate these two:
(start & uncommonMask).U <= uncommonBits &&
uncommonBits <= ((end-1) & uncommonMask).U
}
def shift(x: Int) = IdRange(start+x, end+x)
def size = end - start
def isEmpty = end == start
def range = start until end
}
object IdRange
{
def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else {
val ranges = s.sorted
(ranges.tail zip ranges.init) find { case (a, b) => a overlaps b }
}
}
// An potentially empty inclusive range of 2-powers [min, max] (in bytes)
case class TransferSizes(min: Int, max: Int)
{
def this(x: Int) = this(x, x)
require (min <= max, s"Min transfer $min > max transfer $max")
require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)")
require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max")
require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min")
require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)")
def none = min == 0
def contains(x: Int) = isPow2(x) && min <= x && x <= max
def containsLg(x: Int) = contains(1 << x)
def containsLg(x: UInt) =
if (none) false.B
else if (min == max) { log2Ceil(min).U === x }
else { log2Ceil(min).U <= x && x <= log2Ceil(max).U }
def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max)
def intersect(x: TransferSizes) =
if (x.max < min || max < x.min) TransferSizes.none
else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max))
// Not a union, because the result may contain sizes contained by neither term
// NOT TO BE CONFUSED WITH COVERPOINTS
def mincover(x: TransferSizes) = {
if (none) {
x
} else if (x.none) {
this
} else {
TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max))
}
}
override def toString() = "TransferSizes[%d, %d]".format(min, max)
}
object TransferSizes {
def apply(x: Int) = new TransferSizes(x)
val none = new TransferSizes(0)
def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _)
def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _)
implicit def asBool(x: TransferSizes) = !x.none
}
// AddressSets specify the address space managed by the manager
// Base is the base address, and mask are the bits consumed by the manager
// e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff
// e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ...
case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet]
{
// Forbid misaligned base address (and empty sets)
require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}")
require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous
// We do allow negative mask (=> ignore all high bits)
def contains(x: BigInt) = ((x ^ base) & ~mask) == 0
def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S
// turn x into an address contained in this set
def legalize(x: UInt): UInt = base.U | (mask.U & x)
// overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1)
def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0
// contains iff bitwise: x.mask => mask && contains(x.base)
def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0
// The number of bytes to which the manager must be aligned
def alignment = ((mask + 1) & ~mask)
// Is this a contiguous memory range
def contiguous = alignment == mask+1
def finite = mask >= 0
def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask }
// Widen the match function to ignore all bits in imask
def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask)
// Return an AddressSet that only contains the addresses both sets contain
def intersect(x: AddressSet): Option[AddressSet] = {
if (!overlaps(x)) {
None
} else {
val r_mask = mask & x.mask
val r_base = base | x.base
Some(AddressSet(r_base, r_mask))
}
}
def subtract(x: AddressSet): Seq[AddressSet] = {
intersect(x) match {
case None => Seq(this)
case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit =>
val nmask = (mask & (bit-1)) | remove.mask
val nbase = (remove.base ^ bit) & ~nmask
AddressSet(nbase, nmask)
}
}
}
// AddressSets have one natural Ordering (the containment order, if contiguous)
def compare(x: AddressSet) = {
val primary = (this.base - x.base).signum // smallest address first
val secondary = (x.mask - this.mask).signum // largest mask first
if (primary != 0) primary else secondary
}
// We always want to see things in hex
override def toString() = {
if (mask >= 0) {
"AddressSet(0x%x, 0x%x)".format(base, mask)
} else {
"AddressSet(0x%x, ~0x%x)".format(base, ~mask)
}
}
def toRanges = {
require (finite, "Ranges cannot be calculated on infinite mask")
val size = alignment
val fragments = mask & ~(size-1)
val bits = bitIndexes(fragments)
(BigInt(0) until (BigInt(1) << bits.size)).map { i =>
val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) }
AddressRange(off, size)
}
}
}
object AddressSet
{
val everything = AddressSet(0, -1)
def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = {
if (size == 0) tail.reverse else {
val maxBaseAlignment = base & (-base) // 0 for infinite (LSB)
val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size
val step =
if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment)
maxSizeAlignment else maxBaseAlignment
misaligned(base+step, size-step, AddressSet(base, step-1) +: tail)
}
}
def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = {
// Pair terms up by ignoring 'bit'
seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) =>
if (seq.size == 1) {
seq.head // singleton -> unaffected
} else {
key.copy(mask = key.mask | bit) // pair - widen mask by bit
}
}.toList
}
def unify(seq: Seq[AddressSet]): Seq[AddressSet] = {
val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _)
AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted
}
def enumerateMask(mask: BigInt): Seq[BigInt] = {
def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] =
if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail)
helper(0, Nil)
}
def enumerateBits(mask: BigInt): Seq[BigInt] = {
def helper(x: BigInt): Seq[BigInt] = {
if (x == 0) {
Nil
} else {
val bit = x & (-x)
bit +: helper(x & ~bit)
}
}
helper(mask)
}
}
case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean)
{
require (depth >= 0, "Buffer depth must be >= 0")
def isDefined = depth > 0
def latency = if (isDefined && !flow) 1 else 0
def apply[T <: Data](x: DecoupledIO[T]) =
if (isDefined) Queue(x, depth, flow=flow, pipe=pipe)
else x
def irrevocable[T <: Data](x: ReadyValidIO[T]) =
if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe)
else x
def sq[T <: Data](x: DecoupledIO[T]) =
if (!isDefined) x else {
val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe))
sq.io.enq <> x
sq.io.deq
}
override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "")
}
object BufferParams
{
implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false)
val default = BufferParams(2)
val none = BufferParams(0)
val flow = BufferParams(1, true, false)
val pipe = BufferParams(1, false, true)
}
case class TriStateValue(value: Boolean, set: Boolean)
{
def update(orig: Boolean) = if (set) value else orig
}
object TriStateValue
{
implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true)
def unset = TriStateValue(false, false)
}
trait DirectedBuffers[T] {
def copyIn(x: BufferParams): T
def copyOut(x: BufferParams): T
def copyInOut(x: BufferParams): T
}
trait IdMapEntry {
def name: String
def from: IdRange
def to: IdRange
def isCache: Boolean
def requestFifo: Boolean
def maxTransactionsInFlight: Option[Int]
def pretty(fmt: String) =
if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5
fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
} else {
fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
}
}
abstract class IdMap[T <: IdMapEntry] {
protected val fmt: String
val mapping: Seq[T]
def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n")
}
File Edges.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
class TLEdge(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdgeParameters(client, manager, params, sourceInfo)
{
def isAligned(address: UInt, lgSize: UInt): Bool = {
if (maxLgSize == 0) true.B else {
val mask = UIntToOH1(lgSize, maxLgSize)
(address & mask) === 0.U
}
}
def mask(address: UInt, lgSize: UInt): UInt =
MaskGen(address, lgSize, manager.beatBytes)
def staticHasData(bundle: TLChannel): Option[Boolean] = {
bundle match {
case _:TLBundleA => {
// Do there exist A messages with Data?
val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial
// Do there exist A messages without Data?
val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint
// Statically optimize the case where hasData is a constant
if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None
}
case _:TLBundleB => {
// Do there exist B messages with Data?
val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial
// Do there exist B messages without Data?
val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint
// Statically optimize the case where hasData is a constant
if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None
}
case _:TLBundleC => {
// Do there eixst C messages with Data?
val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe
// Do there exist C messages without Data?
val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe
if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None
}
case _:TLBundleD => {
// Do there eixst D messages with Data?
val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB
// Do there exist D messages without Data?
val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT
if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None
}
case _:TLBundleE => Some(false)
}
}
def isRequest(x: TLChannel): Bool = {
x match {
case a: TLBundleA => true.B
case b: TLBundleB => true.B
case c: TLBundleC => c.opcode(2) && c.opcode(1)
// opcode === TLMessages.Release ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(2) && !d.opcode(1)
// opcode === TLMessages.Grant ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
}
def isResponse(x: TLChannel): Bool = {
x match {
case a: TLBundleA => false.B
case b: TLBundleB => false.B
case c: TLBundleC => !c.opcode(2) || !c.opcode(1)
// opcode =/= TLMessages.Release &&
// opcode =/= TLMessages.ReleaseData
case d: TLBundleD => true.B // Grant isResponse + isRequest
case e: TLBundleE => true.B
}
}
def hasData(x: TLChannel): Bool = {
val opdata = x match {
case a: TLBundleA => !a.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case b: TLBundleB => !b.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case c: TLBundleC => c.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.ProbeAckData ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
staticHasData(x).map(_.B).getOrElse(opdata)
}
def opcode(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.opcode
case b: TLBundleB => b.opcode
case c: TLBundleC => c.opcode
case d: TLBundleD => d.opcode
}
}
def param(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.param
case b: TLBundleB => b.param
case c: TLBundleC => c.param
case d: TLBundleD => d.param
}
}
def size(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.size
case b: TLBundleB => b.size
case c: TLBundleC => c.size
case d: TLBundleD => d.size
}
}
def data(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.data
case b: TLBundleB => b.data
case c: TLBundleC => c.data
case d: TLBundleD => d.data
}
}
def corrupt(x: TLDataChannel): Bool = {
x match {
case a: TLBundleA => a.corrupt
case b: TLBundleB => b.corrupt
case c: TLBundleC => c.corrupt
case d: TLBundleD => d.corrupt
}
}
def mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.mask
case b: TLBundleB => b.mask
case c: TLBundleC => mask(c.address, c.size)
}
}
def full_mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => mask(a.address, a.size)
case b: TLBundleB => mask(b.address, b.size)
case c: TLBundleC => mask(c.address, c.size)
}
}
def address(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.address
case b: TLBundleB => b.address
case c: TLBundleC => c.address
}
}
def source(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.source
case b: TLBundleB => b.source
case c: TLBundleC => c.source
case d: TLBundleD => d.source
}
}
def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes)
def addr_lo(x: UInt): UInt =
if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0)
def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x))
def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x))
def numBeats(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 1.U
case bundle: TLDataChannel => {
val hasData = this.hasData(bundle)
val size = this.size(bundle)
val cutoff = log2Ceil(manager.beatBytes)
val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U
val decode = UIntToOH(size, maxLgSize+1) >> cutoff
Mux(hasData, decode | small.asUInt, 1.U)
}
}
}
def numBeats1(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 0.U
case bundle: TLDataChannel => {
if (maxLgSize == 0) {
0.U
} else {
val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes)
Mux(hasData(bundle), decode, 0.U)
}
}
}
}
def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val beats1 = numBeats1(bits)
val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W))
val counter1 = counter - 1.U
val first = counter === 0.U
val last = counter === 1.U || beats1 === 0.U
val done = last && fire
val count = (beats1 & ~counter1)
when (fire) {
counter := Mux(first, beats1, counter1)
}
(first, last, done, count)
}
def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1
def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire)
def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid)
def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2
def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire)
def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid)
def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3
def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire)
def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid)
def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3)
}
def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire)
def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid)
def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4)
}
def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire)
def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid)
def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes))
}
def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire)
def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid)
// Does the request need T permissions to be executed?
def needT(a: TLBundleA): Bool = {
val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLPermissions.NtoB -> false.B,
TLPermissions.NtoT -> true.B,
TLPermissions.BtoT -> true.B))
MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array(
TLMessages.PutFullData -> true.B,
TLMessages.PutPartialData -> true.B,
TLMessages.ArithmeticData -> true.B,
TLMessages.LogicalData -> true.B,
TLMessages.Get -> false.B,
TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLHints.PREFETCH_READ -> false.B,
TLHints.PREFETCH_WRITE -> true.B)),
TLMessages.AcquireBlock -> acq_needT,
TLMessages.AcquirePerm -> acq_needT))
}
// This is a very expensive circuit; use only if you really mean it!
def inFlight(x: TLBundle): (UInt, UInt) = {
val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W))
val bce = manager.anySupportAcquireB && client.anySupportProbe
val (a_first, a_last, _) = firstlast(x.a)
val (b_first, b_last, _) = firstlast(x.b)
val (c_first, c_last, _) = firstlast(x.c)
val (d_first, d_last, _) = firstlast(x.d)
val (e_first, e_last, _) = firstlast(x.e)
val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits))
val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits))
val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits))
val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits))
val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits))
val a_inc = x.a.fire && a_first && a_request
val b_inc = x.b.fire && b_first && b_request
val c_inc = x.c.fire && c_first && c_request
val d_inc = x.d.fire && d_first && d_request
val e_inc = x.e.fire && e_first && e_request
val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil))
val a_dec = x.a.fire && a_last && a_response
val b_dec = x.b.fire && b_last && b_response
val c_dec = x.c.fire && c_last && c_response
val d_dec = x.d.fire && d_last && d_response
val e_dec = x.e.fire && e_last && e_response
val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil))
val next_flight = flight + PopCount(inc) - PopCount(dec)
flight := next_flight
(flight, next_flight)
}
def prettySourceMapping(context: String): String = {
s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n"
}
}
class TLEdgeOut(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
// Transfers
def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquireBlock
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquirePerm
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.Release
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ReleaseData
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) =
Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B)
def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAck
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions, data)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAckData
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B)
def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink)
def GrantAck(toSink: UInt): TLBundleE = {
val e = Wire(new TLBundleE(bundle))
e.sink := toSink
e
}
// Accesses
def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsGetFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Get
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutFullFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutFullData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, mask, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutPartialFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutPartialData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = {
require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsArithmeticFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.ArithmeticData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsLogicalFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.LogicalData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = {
require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsHintFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Hint
a.param := param
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data)
def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAckData
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size)
def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.HintAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
}
class TLEdgeIn(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = {
val todo = x.filter(!_.isEmpty)
val heads = todo.map(_.head)
val tails = todo.map(_.tail)
if (todo.isEmpty) Nil else { heads +: myTranspose(tails) }
}
// Transfers
def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = {
require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}")
val legal = client.supportsProbe(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Probe
b.param := capPermissions
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.Grant
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.GrantData
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B)
def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.ReleaseAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
// Accesses
def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = {
require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsGet(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Get
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsPutFull(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutFullData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, mask, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsPutPartial(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutPartialData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsArithmetic(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.ArithmeticData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsLogical(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.LogicalData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = {
require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsHint(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Hint
b.param := param
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size)
def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied)
def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data)
def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAckData
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B)
def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied)
def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B)
def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.HintAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
}
| module TLMonitor_38( // @[Monitor.scala:36:7]
input clock, // @[Monitor.scala:36:7]
input reset, // @[Monitor.scala:36:7]
input io_in_a_ready, // @[Monitor.scala:20:14]
input io_in_a_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_size, // @[Monitor.scala:20:14]
input [4:0] io_in_a_bits_source, // @[Monitor.scala:20:14]
input [31:0] io_in_a_bits_address, // @[Monitor.scala:20:14]
input [7:0] io_in_a_bits_mask, // @[Monitor.scala:20:14]
input [63:0] io_in_a_bits_data, // @[Monitor.scala:20:14]
input io_in_a_bits_corrupt, // @[Monitor.scala:20:14]
input io_in_d_ready, // @[Monitor.scala:20:14]
input io_in_d_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14]
input [1:0] io_in_d_bits_param, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_size, // @[Monitor.scala:20:14]
input [4:0] io_in_d_bits_source, // @[Monitor.scala:20:14]
input io_in_d_bits_sink, // @[Monitor.scala:20:14]
input io_in_d_bits_denied, // @[Monitor.scala:20:14]
input [63:0] io_in_d_bits_data, // @[Monitor.scala:20:14]
input io_in_d_bits_corrupt // @[Monitor.scala:20:14]
);
wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11]
wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11]
wire io_in_a_ready_0 = io_in_a_ready; // @[Monitor.scala:36:7]
wire io_in_a_valid_0 = io_in_a_valid; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_opcode_0 = io_in_a_bits_opcode; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_param_0 = io_in_a_bits_param; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_size_0 = io_in_a_bits_size; // @[Monitor.scala:36:7]
wire [4:0] io_in_a_bits_source_0 = io_in_a_bits_source; // @[Monitor.scala:36:7]
wire [31:0] io_in_a_bits_address_0 = io_in_a_bits_address; // @[Monitor.scala:36:7]
wire [7:0] io_in_a_bits_mask_0 = io_in_a_bits_mask; // @[Monitor.scala:36:7]
wire [63:0] io_in_a_bits_data_0 = io_in_a_bits_data; // @[Monitor.scala:36:7]
wire io_in_a_bits_corrupt_0 = io_in_a_bits_corrupt; // @[Monitor.scala:36:7]
wire io_in_d_ready_0 = io_in_d_ready; // @[Monitor.scala:36:7]
wire io_in_d_valid_0 = io_in_d_valid; // @[Monitor.scala:36:7]
wire [2:0] io_in_d_bits_opcode_0 = io_in_d_bits_opcode; // @[Monitor.scala:36:7]
wire [1:0] io_in_d_bits_param_0 = io_in_d_bits_param; // @[Monitor.scala:36:7]
wire [2:0] io_in_d_bits_size_0 = io_in_d_bits_size; // @[Monitor.scala:36:7]
wire [4:0] io_in_d_bits_source_0 = io_in_d_bits_source; // @[Monitor.scala:36:7]
wire io_in_d_bits_sink_0 = io_in_d_bits_sink; // @[Monitor.scala:36:7]
wire io_in_d_bits_denied_0 = io_in_d_bits_denied; // @[Monitor.scala:36:7]
wire [63:0] io_in_d_bits_data_0 = io_in_d_bits_data; // @[Monitor.scala:36:7]
wire io_in_d_bits_corrupt_0 = io_in_d_bits_corrupt; // @[Monitor.scala:36:7]
wire _source_ok_T = 1'h0; // @[Parameters.scala:54:10]
wire _source_ok_T_6 = 1'h0; // @[Parameters.scala:54:10]
wire sink_ok = 1'h0; // @[Monitor.scala:309:31]
wire _c_first_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_T = 1'h0; // @[Decoupled.scala:51:35]
wire c_first_beats1_opdata = 1'h0; // @[Edges.scala:102:36]
wire _c_first_last_T = 1'h0; // @[Edges.scala:232:25]
wire c_first_done = 1'h0; // @[Edges.scala:233:22]
wire _c_set_wo_ready_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_wo_ready_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_wo_ready_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_T = 1'h0; // @[Monitor.scala:772:47]
wire _c_probe_ack_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_T_1 = 1'h0; // @[Monitor.scala:772:95]
wire c_probe_ack = 1'h0; // @[Monitor.scala:772:71]
wire _same_cycle_resp_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_T_3 = 1'h0; // @[Monitor.scala:795:44]
wire _same_cycle_resp_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_T_4 = 1'h0; // @[Edges.scala:68:36]
wire _same_cycle_resp_T_5 = 1'h0; // @[Edges.scala:68:51]
wire _same_cycle_resp_T_6 = 1'h0; // @[Edges.scala:68:40]
wire _same_cycle_resp_T_7 = 1'h0; // @[Monitor.scala:795:55]
wire _same_cycle_resp_WIRE_4_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_4_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_4_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_5_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_5_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_5_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire same_cycle_resp_1 = 1'h0; // @[Monitor.scala:795:88]
wire [2:0] responseMap_0 = 3'h0; // @[Monitor.scala:643:42]
wire [2:0] responseMap_1 = 3'h0; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_0 = 3'h0; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_1 = 3'h0; // @[Monitor.scala:644:42]
wire [2:0] _c_first_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] c_first_beats1_decode = 3'h0; // @[Edges.scala:220:59]
wire [2:0] c_first_beats1 = 3'h0; // @[Edges.scala:221:14]
wire [2:0] _c_first_count_T = 3'h0; // @[Edges.scala:234:27]
wire [2:0] c_first_count = 3'h0; // @[Edges.scala:234:25]
wire [2:0] _c_first_counter_T = 3'h0; // @[Edges.scala:236:21]
wire [2:0] _c_set_wo_ready_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_4_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_4_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_4_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_5_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_5_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_5_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire _source_ok_T_1 = 1'h1; // @[Parameters.scala:54:32]
wire _source_ok_T_2 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_3 = 1'h1; // @[Parameters.scala:54:67]
wire _source_ok_T_7 = 1'h1; // @[Parameters.scala:54:32]
wire _source_ok_T_8 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_9 = 1'h1; // @[Parameters.scala:54:67]
wire c_first = 1'h1; // @[Edges.scala:231:25]
wire _c_first_last_T_1 = 1'h1; // @[Edges.scala:232:43]
wire c_first_last = 1'h1; // @[Edges.scala:232:33]
wire [2:0] c_first_counter1 = 3'h7; // @[Edges.scala:230:28]
wire [3:0] _c_first_counter1_T = 4'hF; // @[Edges.scala:230:28]
wire [63:0] _c_first_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_first_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_first_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_first_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_set_wo_ready_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_set_wo_ready_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_opcodes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_opcodes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_sizes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_sizes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_opcodes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_opcodes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_sizes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_sizes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_probe_ack_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_probe_ack_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_probe_ack_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_probe_ack_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_4_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_5_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [31:0] _c_first_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _c_first_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _c_first_WIRE_2_bits_address = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _c_first_WIRE_3_bits_address = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _c_set_wo_ready_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _c_set_wo_ready_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _c_set_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _c_set_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _c_opcodes_set_interm_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _c_opcodes_set_interm_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _c_sizes_set_interm_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _c_sizes_set_interm_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _c_opcodes_set_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _c_opcodes_set_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _c_sizes_set_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _c_sizes_set_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _c_probe_ack_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _c_probe_ack_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _c_probe_ack_WIRE_2_bits_address = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _c_probe_ack_WIRE_3_bits_address = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _same_cycle_resp_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _same_cycle_resp_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _same_cycle_resp_WIRE_2_bits_address = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _same_cycle_resp_WIRE_3_bits_address = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _same_cycle_resp_WIRE_4_bits_address = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _same_cycle_resp_WIRE_5_bits_address = 32'h0; // @[Bundles.scala:265:61]
wire [4:0] _c_first_WIRE_bits_source = 5'h0; // @[Bundles.scala:265:74]
wire [4:0] _c_first_WIRE_1_bits_source = 5'h0; // @[Bundles.scala:265:61]
wire [4:0] _c_first_WIRE_2_bits_source = 5'h0; // @[Bundles.scala:265:74]
wire [4:0] _c_first_WIRE_3_bits_source = 5'h0; // @[Bundles.scala:265:61]
wire [4:0] _c_set_wo_ready_WIRE_bits_source = 5'h0; // @[Bundles.scala:265:74]
wire [4:0] _c_set_wo_ready_WIRE_1_bits_source = 5'h0; // @[Bundles.scala:265:61]
wire [4:0] _c_set_WIRE_bits_source = 5'h0; // @[Bundles.scala:265:74]
wire [4:0] _c_set_WIRE_1_bits_source = 5'h0; // @[Bundles.scala:265:61]
wire [4:0] _c_opcodes_set_interm_WIRE_bits_source = 5'h0; // @[Bundles.scala:265:74]
wire [4:0] _c_opcodes_set_interm_WIRE_1_bits_source = 5'h0; // @[Bundles.scala:265:61]
wire [4:0] _c_sizes_set_interm_WIRE_bits_source = 5'h0; // @[Bundles.scala:265:74]
wire [4:0] _c_sizes_set_interm_WIRE_1_bits_source = 5'h0; // @[Bundles.scala:265:61]
wire [4:0] _c_opcodes_set_WIRE_bits_source = 5'h0; // @[Bundles.scala:265:74]
wire [4:0] _c_opcodes_set_WIRE_1_bits_source = 5'h0; // @[Bundles.scala:265:61]
wire [4:0] _c_sizes_set_WIRE_bits_source = 5'h0; // @[Bundles.scala:265:74]
wire [4:0] _c_sizes_set_WIRE_1_bits_source = 5'h0; // @[Bundles.scala:265:61]
wire [4:0] _c_probe_ack_WIRE_bits_source = 5'h0; // @[Bundles.scala:265:74]
wire [4:0] _c_probe_ack_WIRE_1_bits_source = 5'h0; // @[Bundles.scala:265:61]
wire [4:0] _c_probe_ack_WIRE_2_bits_source = 5'h0; // @[Bundles.scala:265:74]
wire [4:0] _c_probe_ack_WIRE_3_bits_source = 5'h0; // @[Bundles.scala:265:61]
wire [4:0] _same_cycle_resp_WIRE_bits_source = 5'h0; // @[Bundles.scala:265:74]
wire [4:0] _same_cycle_resp_WIRE_1_bits_source = 5'h0; // @[Bundles.scala:265:61]
wire [4:0] _same_cycle_resp_WIRE_2_bits_source = 5'h0; // @[Bundles.scala:265:74]
wire [4:0] _same_cycle_resp_WIRE_3_bits_source = 5'h0; // @[Bundles.scala:265:61]
wire [4:0] _same_cycle_resp_WIRE_4_bits_source = 5'h0; // @[Bundles.scala:265:74]
wire [4:0] _same_cycle_resp_WIRE_5_bits_source = 5'h0; // @[Bundles.scala:265:61]
wire [15:0] _a_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _a_size_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _d_opcodes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _d_sizes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _c_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _c_size_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _d_opcodes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _d_sizes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57]
wire [16:0] _a_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _a_size_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _d_opcodes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _d_sizes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _c_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _c_size_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _d_opcodes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _d_sizes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57]
wire [15:0] _a_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _a_size_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _d_opcodes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _d_sizes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _c_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _c_size_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _d_opcodes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _d_sizes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51]
wire [258:0] _c_opcodes_set_T_1 = 259'h0; // @[Monitor.scala:767:54]
wire [258:0] _c_sizes_set_T_1 = 259'h0; // @[Monitor.scala:768:52]
wire [7:0] _c_opcodes_set_T = 8'h0; // @[Monitor.scala:767:79]
wire [7:0] _c_sizes_set_T = 8'h0; // @[Monitor.scala:768:77]
wire [3:0] _c_opcodes_set_interm_T_1 = 4'h1; // @[Monitor.scala:765:61]
wire [3:0] _c_sizes_set_interm_T_1 = 4'h1; // @[Monitor.scala:766:59]
wire [3:0] c_opcodes_set_interm = 4'h0; // @[Monitor.scala:754:40]
wire [3:0] c_sizes_set_interm = 4'h0; // @[Monitor.scala:755:40]
wire [3:0] _c_opcodes_set_interm_T = 4'h0; // @[Monitor.scala:765:53]
wire [3:0] _c_sizes_set_interm_T = 4'h0; // @[Monitor.scala:766:51]
wire [31:0] _c_set_wo_ready_T = 32'h1; // @[OneHot.scala:58:35]
wire [31:0] _c_set_T = 32'h1; // @[OneHot.scala:58:35]
wire [79:0] c_opcodes_set = 80'h0; // @[Monitor.scala:740:34]
wire [79:0] c_sizes_set = 80'h0; // @[Monitor.scala:741:34]
wire [19:0] c_set = 20'h0; // @[Monitor.scala:738:34]
wire [19:0] c_set_wo_ready = 20'h0; // @[Monitor.scala:739:34]
wire [5:0] _c_first_beats1_decode_T_2 = 6'h0; // @[package.scala:243:46]
wire [5:0] _c_first_beats1_decode_T_1 = 6'h3F; // @[package.scala:243:76]
wire [12:0] _c_first_beats1_decode_T = 13'h3F; // @[package.scala:243:71]
wire [2:0] responseMap_6 = 3'h4; // @[Monitor.scala:643:42]
wire [2:0] responseMap_7 = 3'h4; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_7 = 3'h4; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_6 = 3'h5; // @[Monitor.scala:644:42]
wire [2:0] responseMap_5 = 3'h2; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_5 = 3'h2; // @[Monitor.scala:644:42]
wire [2:0] responseMap_2 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMap_3 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMap_4 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_2 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_3 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_4 = 3'h1; // @[Monitor.scala:644:42]
wire [3:0] _a_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:637:123]
wire [3:0] _a_size_lookup_T_2 = 4'h4; // @[Monitor.scala:641:117]
wire [3:0] _d_opcodes_clr_T = 4'h4; // @[Monitor.scala:680:48]
wire [3:0] _d_sizes_clr_T = 4'h4; // @[Monitor.scala:681:48]
wire [3:0] _c_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:749:123]
wire [3:0] _c_size_lookup_T_2 = 4'h4; // @[Monitor.scala:750:119]
wire [3:0] _d_opcodes_clr_T_6 = 4'h4; // @[Monitor.scala:790:48]
wire [3:0] _d_sizes_clr_T_6 = 4'h4; // @[Monitor.scala:791:48]
wire [2:0] _mask_sizeOH_T = io_in_a_bits_size_0; // @[Misc.scala:202:34]
wire [4:0] _source_ok_uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [4:0] _uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [4:0] _uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [4:0] _uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [4:0] _uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [4:0] _uncommonBits_T_4 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [4:0] _uncommonBits_T_5 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [4:0] _uncommonBits_T_6 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [4:0] _uncommonBits_T_7 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [4:0] _uncommonBits_T_8 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_uncommonBits_T_1 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [4:0] source_ok_uncommonBits = _source_ok_uncommonBits_T; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_4 = source_ok_uncommonBits < 5'h14; // @[Parameters.scala:52:56, :57:20]
wire _source_ok_T_5 = _source_ok_T_4; // @[Parameters.scala:56:48, :57:20]
wire _source_ok_WIRE_0 = _source_ok_T_5; // @[Parameters.scala:1138:31]
wire [12:0] _GEN = 13'h3F << io_in_a_bits_size_0; // @[package.scala:243:71]
wire [12:0] _is_aligned_mask_T; // @[package.scala:243:71]
assign _is_aligned_mask_T = _GEN; // @[package.scala:243:71]
wire [12:0] _a_first_beats1_decode_T; // @[package.scala:243:71]
assign _a_first_beats1_decode_T = _GEN; // @[package.scala:243:71]
wire [12:0] _a_first_beats1_decode_T_3; // @[package.scala:243:71]
assign _a_first_beats1_decode_T_3 = _GEN; // @[package.scala:243:71]
wire [5:0] _is_aligned_mask_T_1 = _is_aligned_mask_T[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] is_aligned_mask = ~_is_aligned_mask_T_1; // @[package.scala:243:{46,76}]
wire [31:0] _is_aligned_T = {26'h0, io_in_a_bits_address_0[5:0] & is_aligned_mask}; // @[package.scala:243:46]
wire is_aligned = _is_aligned_T == 32'h0; // @[Edges.scala:21:{16,24}]
wire [1:0] mask_sizeOH_shiftAmount = _mask_sizeOH_T[1:0]; // @[OneHot.scala:64:49]
wire [3:0] _mask_sizeOH_T_1 = 4'h1 << mask_sizeOH_shiftAmount; // @[OneHot.scala:64:49, :65:12]
wire [2:0] _mask_sizeOH_T_2 = _mask_sizeOH_T_1[2:0]; // @[OneHot.scala:65:{12,27}]
wire [2:0] mask_sizeOH = {_mask_sizeOH_T_2[2:1], 1'h1}; // @[OneHot.scala:65:27]
wire mask_sub_sub_sub_0_1 = io_in_a_bits_size_0 > 3'h2; // @[Misc.scala:206:21]
wire mask_sub_sub_size = mask_sizeOH[2]; // @[Misc.scala:202:81, :209:26]
wire mask_sub_sub_bit = io_in_a_bits_address_0[2]; // @[Misc.scala:210:26]
wire mask_sub_sub_1_2 = mask_sub_sub_bit; // @[Misc.scala:210:26, :214:27]
wire mask_sub_sub_nbit = ~mask_sub_sub_bit; // @[Misc.scala:210:26, :211:20]
wire mask_sub_sub_0_2 = mask_sub_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_sub_acc_T = mask_sub_sub_size & mask_sub_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_sub_0_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T; // @[Misc.scala:206:21, :215:{29,38}]
wire _mask_sub_sub_acc_T_1 = mask_sub_sub_size & mask_sub_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_sub_1_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T_1; // @[Misc.scala:206:21, :215:{29,38}]
wire mask_sub_size = mask_sizeOH[1]; // @[Misc.scala:202:81, :209:26]
wire mask_sub_bit = io_in_a_bits_address_0[1]; // @[Misc.scala:210:26]
wire mask_sub_nbit = ~mask_sub_bit; // @[Misc.scala:210:26, :211:20]
wire mask_sub_0_2 = mask_sub_sub_0_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_acc_T = mask_sub_size & mask_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_0_1 = mask_sub_sub_0_1 | _mask_sub_acc_T; // @[Misc.scala:215:{29,38}]
wire mask_sub_1_2 = mask_sub_sub_0_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_sub_acc_T_1 = mask_sub_size & mask_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_1_1 = mask_sub_sub_0_1 | _mask_sub_acc_T_1; // @[Misc.scala:215:{29,38}]
wire mask_sub_2_2 = mask_sub_sub_1_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_acc_T_2 = mask_sub_size & mask_sub_2_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_2_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_2; // @[Misc.scala:215:{29,38}]
wire mask_sub_3_2 = mask_sub_sub_1_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_sub_acc_T_3 = mask_sub_size & mask_sub_3_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_3_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_3; // @[Misc.scala:215:{29,38}]
wire mask_size = mask_sizeOH[0]; // @[Misc.scala:202:81, :209:26]
wire mask_bit = io_in_a_bits_address_0[0]; // @[Misc.scala:210:26]
wire mask_nbit = ~mask_bit; // @[Misc.scala:210:26, :211:20]
wire mask_eq = mask_sub_0_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T = mask_size & mask_eq; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc = mask_sub_0_1 | _mask_acc_T; // @[Misc.scala:215:{29,38}]
wire mask_eq_1 = mask_sub_0_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_1 = mask_size & mask_eq_1; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_1 = mask_sub_0_1 | _mask_acc_T_1; // @[Misc.scala:215:{29,38}]
wire mask_eq_2 = mask_sub_1_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_2 = mask_size & mask_eq_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_2 = mask_sub_1_1 | _mask_acc_T_2; // @[Misc.scala:215:{29,38}]
wire mask_eq_3 = mask_sub_1_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_3 = mask_size & mask_eq_3; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_3 = mask_sub_1_1 | _mask_acc_T_3; // @[Misc.scala:215:{29,38}]
wire mask_eq_4 = mask_sub_2_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_4 = mask_size & mask_eq_4; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_4 = mask_sub_2_1 | _mask_acc_T_4; // @[Misc.scala:215:{29,38}]
wire mask_eq_5 = mask_sub_2_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_5 = mask_size & mask_eq_5; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_5 = mask_sub_2_1 | _mask_acc_T_5; // @[Misc.scala:215:{29,38}]
wire mask_eq_6 = mask_sub_3_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_6 = mask_size & mask_eq_6; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_6 = mask_sub_3_1 | _mask_acc_T_6; // @[Misc.scala:215:{29,38}]
wire mask_eq_7 = mask_sub_3_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_7 = mask_size & mask_eq_7; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_7 = mask_sub_3_1 | _mask_acc_T_7; // @[Misc.scala:215:{29,38}]
wire [1:0] mask_lo_lo = {mask_acc_1, mask_acc}; // @[Misc.scala:215:29, :222:10]
wire [1:0] mask_lo_hi = {mask_acc_3, mask_acc_2}; // @[Misc.scala:215:29, :222:10]
wire [3:0] mask_lo = {mask_lo_hi, mask_lo_lo}; // @[Misc.scala:222:10]
wire [1:0] mask_hi_lo = {mask_acc_5, mask_acc_4}; // @[Misc.scala:215:29, :222:10]
wire [1:0] mask_hi_hi = {mask_acc_7, mask_acc_6}; // @[Misc.scala:215:29, :222:10]
wire [3:0] mask_hi = {mask_hi_hi, mask_hi_lo}; // @[Misc.scala:222:10]
wire [7:0] mask = {mask_hi, mask_lo}; // @[Misc.scala:222:10]
wire [4:0] uncommonBits = _uncommonBits_T; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_1 = _uncommonBits_T_1; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_2 = _uncommonBits_T_2; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_3 = _uncommonBits_T_3; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_4 = _uncommonBits_T_4; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_5 = _uncommonBits_T_5; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_6 = _uncommonBits_T_6; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_7 = _uncommonBits_T_7; // @[Parameters.scala:52:{29,56}]
wire [4:0] uncommonBits_8 = _uncommonBits_T_8; // @[Parameters.scala:52:{29,56}]
wire [4:0] source_ok_uncommonBits_1 = _source_ok_uncommonBits_T_1; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_10 = source_ok_uncommonBits_1 < 5'h14; // @[Parameters.scala:52:56, :57:20]
wire _source_ok_T_11 = _source_ok_T_10; // @[Parameters.scala:56:48, :57:20]
wire _source_ok_WIRE_1_0 = _source_ok_T_11; // @[Parameters.scala:1138:31]
wire _T_732 = io_in_a_ready_0 & io_in_a_valid_0; // @[Decoupled.scala:51:35]
wire _a_first_T; // @[Decoupled.scala:51:35]
assign _a_first_T = _T_732; // @[Decoupled.scala:51:35]
wire _a_first_T_1; // @[Decoupled.scala:51:35]
assign _a_first_T_1 = _T_732; // @[Decoupled.scala:51:35]
wire [5:0] _a_first_beats1_decode_T_1 = _a_first_beats1_decode_T[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _a_first_beats1_decode_T_2 = ~_a_first_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire [2:0] a_first_beats1_decode = _a_first_beats1_decode_T_2[5:3]; // @[package.scala:243:46]
wire _a_first_beats1_opdata_T = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7]
wire _a_first_beats1_opdata_T_1 = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7]
wire a_first_beats1_opdata = ~_a_first_beats1_opdata_T; // @[Edges.scala:92:{28,37}]
wire [2:0] a_first_beats1 = a_first_beats1_opdata ? a_first_beats1_decode : 3'h0; // @[Edges.scala:92:28, :220:59, :221:14]
reg [2:0] a_first_counter; // @[Edges.scala:229:27]
wire [3:0] _a_first_counter1_T = {1'h0, a_first_counter} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] a_first_counter1 = _a_first_counter1_T[2:0]; // @[Edges.scala:230:28]
wire a_first = a_first_counter == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _a_first_last_T = a_first_counter == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _a_first_last_T_1 = a_first_beats1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire a_first_last = _a_first_last_T | _a_first_last_T_1; // @[Edges.scala:232:{25,33,43}]
wire a_first_done = a_first_last & _a_first_T; // @[Decoupled.scala:51:35]
wire [2:0] _a_first_count_T = ~a_first_counter1; // @[Edges.scala:230:28, :234:27]
wire [2:0] a_first_count = a_first_beats1 & _a_first_count_T; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _a_first_counter_T = a_first ? a_first_beats1 : a_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
reg [2:0] opcode; // @[Monitor.scala:387:22]
reg [2:0] param; // @[Monitor.scala:388:22]
reg [2:0] size; // @[Monitor.scala:389:22]
reg [4:0] source; // @[Monitor.scala:390:22]
reg [31:0] address; // @[Monitor.scala:391:22]
wire _T_805 = io_in_d_ready_0 & io_in_d_valid_0; // @[Decoupled.scala:51:35]
wire _d_first_T; // @[Decoupled.scala:51:35]
assign _d_first_T = _T_805; // @[Decoupled.scala:51:35]
wire _d_first_T_1; // @[Decoupled.scala:51:35]
assign _d_first_T_1 = _T_805; // @[Decoupled.scala:51:35]
wire _d_first_T_2; // @[Decoupled.scala:51:35]
assign _d_first_T_2 = _T_805; // @[Decoupled.scala:51:35]
wire [12:0] _GEN_0 = 13'h3F << io_in_d_bits_size_0; // @[package.scala:243:71]
wire [12:0] _d_first_beats1_decode_T; // @[package.scala:243:71]
assign _d_first_beats1_decode_T = _GEN_0; // @[package.scala:243:71]
wire [12:0] _d_first_beats1_decode_T_3; // @[package.scala:243:71]
assign _d_first_beats1_decode_T_3 = _GEN_0; // @[package.scala:243:71]
wire [12:0] _d_first_beats1_decode_T_6; // @[package.scala:243:71]
assign _d_first_beats1_decode_T_6 = _GEN_0; // @[package.scala:243:71]
wire [5:0] _d_first_beats1_decode_T_1 = _d_first_beats1_decode_T[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _d_first_beats1_decode_T_2 = ~_d_first_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire [2:0] d_first_beats1_decode = _d_first_beats1_decode_T_2[5:3]; // @[package.scala:243:46]
wire d_first_beats1_opdata = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire d_first_beats1_opdata_1 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire d_first_beats1_opdata_2 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire [2:0] d_first_beats1 = d_first_beats1_opdata ? d_first_beats1_decode : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [2:0] d_first_counter; // @[Edges.scala:229:27]
wire [3:0] _d_first_counter1_T = {1'h0, d_first_counter} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] d_first_counter1 = _d_first_counter1_T[2:0]; // @[Edges.scala:230:28]
wire d_first = d_first_counter == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T = d_first_counter == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_1 = d_first_beats1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last = _d_first_last_T | _d_first_last_T_1; // @[Edges.scala:232:{25,33,43}]
wire d_first_done = d_first_last & _d_first_T; // @[Decoupled.scala:51:35]
wire [2:0] _d_first_count_T = ~d_first_counter1; // @[Edges.scala:230:28, :234:27]
wire [2:0] d_first_count = d_first_beats1 & _d_first_count_T; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _d_first_counter_T = d_first ? d_first_beats1 : d_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
reg [2:0] opcode_1; // @[Monitor.scala:538:22]
reg [1:0] param_1; // @[Monitor.scala:539:22]
reg [2:0] size_1; // @[Monitor.scala:540:22]
reg [4:0] source_1; // @[Monitor.scala:541:22]
reg sink; // @[Monitor.scala:542:22]
reg denied; // @[Monitor.scala:543:22]
reg [19:0] inflight; // @[Monitor.scala:614:27]
reg [79:0] inflight_opcodes; // @[Monitor.scala:616:35]
reg [79:0] inflight_sizes; // @[Monitor.scala:618:33]
wire [5:0] _a_first_beats1_decode_T_4 = _a_first_beats1_decode_T_3[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _a_first_beats1_decode_T_5 = ~_a_first_beats1_decode_T_4; // @[package.scala:243:{46,76}]
wire [2:0] a_first_beats1_decode_1 = _a_first_beats1_decode_T_5[5:3]; // @[package.scala:243:46]
wire a_first_beats1_opdata_1 = ~_a_first_beats1_opdata_T_1; // @[Edges.scala:92:{28,37}]
wire [2:0] a_first_beats1_1 = a_first_beats1_opdata_1 ? a_first_beats1_decode_1 : 3'h0; // @[Edges.scala:92:28, :220:59, :221:14]
reg [2:0] a_first_counter_1; // @[Edges.scala:229:27]
wire [3:0] _a_first_counter1_T_1 = {1'h0, a_first_counter_1} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] a_first_counter1_1 = _a_first_counter1_T_1[2:0]; // @[Edges.scala:230:28]
wire a_first_1 = a_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _a_first_last_T_2 = a_first_counter_1 == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _a_first_last_T_3 = a_first_beats1_1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire a_first_last_1 = _a_first_last_T_2 | _a_first_last_T_3; // @[Edges.scala:232:{25,33,43}]
wire a_first_done_1 = a_first_last_1 & _a_first_T_1; // @[Decoupled.scala:51:35]
wire [2:0] _a_first_count_T_1 = ~a_first_counter1_1; // @[Edges.scala:230:28, :234:27]
wire [2:0] a_first_count_1 = a_first_beats1_1 & _a_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _a_first_counter_T_1 = a_first_1 ? a_first_beats1_1 : a_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [5:0] _d_first_beats1_decode_T_4 = _d_first_beats1_decode_T_3[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _d_first_beats1_decode_T_5 = ~_d_first_beats1_decode_T_4; // @[package.scala:243:{46,76}]
wire [2:0] d_first_beats1_decode_1 = _d_first_beats1_decode_T_5[5:3]; // @[package.scala:243:46]
wire [2:0] d_first_beats1_1 = d_first_beats1_opdata_1 ? d_first_beats1_decode_1 : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [2:0] d_first_counter_1; // @[Edges.scala:229:27]
wire [3:0] _d_first_counter1_T_1 = {1'h0, d_first_counter_1} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] d_first_counter1_1 = _d_first_counter1_T_1[2:0]; // @[Edges.scala:230:28]
wire d_first_1 = d_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T_2 = d_first_counter_1 == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_3 = d_first_beats1_1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last_1 = _d_first_last_T_2 | _d_first_last_T_3; // @[Edges.scala:232:{25,33,43}]
wire d_first_done_1 = d_first_last_1 & _d_first_T_1; // @[Decoupled.scala:51:35]
wire [2:0] _d_first_count_T_1 = ~d_first_counter1_1; // @[Edges.scala:230:28, :234:27]
wire [2:0] d_first_count_1 = d_first_beats1_1 & _d_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _d_first_counter_T_1 = d_first_1 ? d_first_beats1_1 : d_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [19:0] a_set; // @[Monitor.scala:626:34]
wire [19:0] a_set_wo_ready; // @[Monitor.scala:627:34]
wire [79:0] a_opcodes_set; // @[Monitor.scala:630:33]
wire [79:0] a_sizes_set; // @[Monitor.scala:632:31]
wire [2:0] a_opcode_lookup; // @[Monitor.scala:635:35]
wire [7:0] _GEN_1 = {1'h0, io_in_d_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :637:69]
wire [7:0] _a_opcode_lookup_T; // @[Monitor.scala:637:69]
assign _a_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69]
wire [7:0] _a_size_lookup_T; // @[Monitor.scala:641:65]
assign _a_size_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :641:65]
wire [7:0] _d_opcodes_clr_T_4; // @[Monitor.scala:680:101]
assign _d_opcodes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :680:101]
wire [7:0] _d_sizes_clr_T_4; // @[Monitor.scala:681:99]
assign _d_sizes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :681:99]
wire [7:0] _c_opcode_lookup_T; // @[Monitor.scala:749:69]
assign _c_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :749:69]
wire [7:0] _c_size_lookup_T; // @[Monitor.scala:750:67]
assign _c_size_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :750:67]
wire [7:0] _d_opcodes_clr_T_10; // @[Monitor.scala:790:101]
assign _d_opcodes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :790:101]
wire [7:0] _d_sizes_clr_T_10; // @[Monitor.scala:791:99]
assign _d_sizes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :791:99]
wire [79:0] _a_opcode_lookup_T_1 = inflight_opcodes >> _a_opcode_lookup_T; // @[Monitor.scala:616:35, :637:{44,69}]
wire [79:0] _a_opcode_lookup_T_6 = {76'h0, _a_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:637:{44,97}]
wire [79:0] _a_opcode_lookup_T_7 = {1'h0, _a_opcode_lookup_T_6[79:1]}; // @[Monitor.scala:637:{97,152}]
assign a_opcode_lookup = _a_opcode_lookup_T_7[2:0]; // @[Monitor.scala:635:35, :637:{21,152}]
wire [3:0] a_size_lookup; // @[Monitor.scala:639:33]
wire [79:0] _a_size_lookup_T_1 = inflight_sizes >> _a_size_lookup_T; // @[Monitor.scala:618:33, :641:{40,65}]
wire [79:0] _a_size_lookup_T_6 = {76'h0, _a_size_lookup_T_1[3:0]}; // @[Monitor.scala:641:{40,91}]
wire [79:0] _a_size_lookup_T_7 = {1'h0, _a_size_lookup_T_6[79:1]}; // @[Monitor.scala:641:{91,144}]
assign a_size_lookup = _a_size_lookup_T_7[3:0]; // @[Monitor.scala:639:33, :641:{19,144}]
wire [3:0] a_opcodes_set_interm; // @[Monitor.scala:646:40]
wire [3:0] a_sizes_set_interm; // @[Monitor.scala:648:38]
wire _same_cycle_resp_T = io_in_a_valid_0 & a_first_1; // @[Monitor.scala:36:7, :651:26, :684:44]
wire [31:0] _GEN_2 = 32'h1 << io_in_a_bits_source_0; // @[OneHot.scala:58:35]
wire [31:0] _a_set_wo_ready_T; // @[OneHot.scala:58:35]
assign _a_set_wo_ready_T = _GEN_2; // @[OneHot.scala:58:35]
wire [31:0] _a_set_T; // @[OneHot.scala:58:35]
assign _a_set_T = _GEN_2; // @[OneHot.scala:58:35]
assign a_set_wo_ready = _same_cycle_resp_T ? _a_set_wo_ready_T[19:0] : 20'h0; // @[OneHot.scala:58:35]
wire _T_658 = _T_732 & a_first_1; // @[Decoupled.scala:51:35]
assign a_set = _T_658 ? _a_set_T[19:0] : 20'h0; // @[OneHot.scala:58:35]
wire [3:0] _a_opcodes_set_interm_T = {io_in_a_bits_opcode_0, 1'h0}; // @[Monitor.scala:36:7, :657:53]
wire [3:0] _a_opcodes_set_interm_T_1 = {_a_opcodes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:657:{53,61}]
assign a_opcodes_set_interm = _T_658 ? _a_opcodes_set_interm_T_1 : 4'h0; // @[Monitor.scala:646:40, :655:{25,70}, :657:{28,61}]
wire [3:0] _a_sizes_set_interm_T = {io_in_a_bits_size_0, 1'h0}; // @[Monitor.scala:36:7, :658:51]
wire [3:0] _a_sizes_set_interm_T_1 = {_a_sizes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:658:{51,59}]
assign a_sizes_set_interm = _T_658 ? _a_sizes_set_interm_T_1 : 4'h0; // @[Monitor.scala:648:38, :655:{25,70}, :658:{28,59}]
wire [7:0] _GEN_3 = {1'h0, io_in_a_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :659:79]
wire [7:0] _a_opcodes_set_T; // @[Monitor.scala:659:79]
assign _a_opcodes_set_T = _GEN_3; // @[Monitor.scala:659:79]
wire [7:0] _a_sizes_set_T; // @[Monitor.scala:660:77]
assign _a_sizes_set_T = _GEN_3; // @[Monitor.scala:659:79, :660:77]
wire [258:0] _a_opcodes_set_T_1 = {255'h0, a_opcodes_set_interm} << _a_opcodes_set_T; // @[Monitor.scala:646:40, :659:{54,79}]
assign a_opcodes_set = _T_658 ? _a_opcodes_set_T_1[79:0] : 80'h0; // @[Monitor.scala:630:33, :655:{25,70}, :659:{28,54}]
wire [258:0] _a_sizes_set_T_1 = {255'h0, a_sizes_set_interm} << _a_sizes_set_T; // @[Monitor.scala:648:38, :659:54, :660:{52,77}]
assign a_sizes_set = _T_658 ? _a_sizes_set_T_1[79:0] : 80'h0; // @[Monitor.scala:632:31, :655:{25,70}, :660:{28,52}]
wire [19:0] d_clr; // @[Monitor.scala:664:34]
wire [19:0] d_clr_wo_ready; // @[Monitor.scala:665:34]
wire [79:0] d_opcodes_clr; // @[Monitor.scala:668:33]
wire [79:0] d_sizes_clr; // @[Monitor.scala:670:31]
wire _GEN_4 = io_in_d_bits_opcode_0 == 3'h6; // @[Monitor.scala:36:7, :673:46]
wire d_release_ack; // @[Monitor.scala:673:46]
assign d_release_ack = _GEN_4; // @[Monitor.scala:673:46]
wire d_release_ack_1; // @[Monitor.scala:783:46]
assign d_release_ack_1 = _GEN_4; // @[Monitor.scala:673:46, :783:46]
wire _T_704 = io_in_d_valid_0 & d_first_1; // @[Monitor.scala:36:7, :674:26]
wire [31:0] _GEN_5 = 32'h1 << io_in_d_bits_source_0; // @[OneHot.scala:58:35]
wire [31:0] _d_clr_wo_ready_T; // @[OneHot.scala:58:35]
assign _d_clr_wo_ready_T = _GEN_5; // @[OneHot.scala:58:35]
wire [31:0] _d_clr_T; // @[OneHot.scala:58:35]
assign _d_clr_T = _GEN_5; // @[OneHot.scala:58:35]
wire [31:0] _d_clr_wo_ready_T_1; // @[OneHot.scala:58:35]
assign _d_clr_wo_ready_T_1 = _GEN_5; // @[OneHot.scala:58:35]
wire [31:0] _d_clr_T_1; // @[OneHot.scala:58:35]
assign _d_clr_T_1 = _GEN_5; // @[OneHot.scala:58:35]
assign d_clr_wo_ready = _T_704 & ~d_release_ack ? _d_clr_wo_ready_T[19:0] : 20'h0; // @[OneHot.scala:58:35]
wire _T_673 = _T_805 & d_first_1 & ~d_release_ack; // @[Decoupled.scala:51:35]
assign d_clr = _T_673 ? _d_clr_T[19:0] : 20'h0; // @[OneHot.scala:58:35]
wire [270:0] _d_opcodes_clr_T_5 = 271'hF << _d_opcodes_clr_T_4; // @[Monitor.scala:680:{76,101}]
assign d_opcodes_clr = _T_673 ? _d_opcodes_clr_T_5[79:0] : 80'h0; // @[Monitor.scala:668:33, :678:{25,70,89}, :680:{21,76}]
wire [270:0] _d_sizes_clr_T_5 = 271'hF << _d_sizes_clr_T_4; // @[Monitor.scala:681:{74,99}]
assign d_sizes_clr = _T_673 ? _d_sizes_clr_T_5[79:0] : 80'h0; // @[Monitor.scala:670:31, :678:{25,70,89}, :681:{21,74}]
wire _same_cycle_resp_T_1 = _same_cycle_resp_T; // @[Monitor.scala:684:{44,55}]
wire _same_cycle_resp_T_2 = io_in_a_bits_source_0 == io_in_d_bits_source_0; // @[Monitor.scala:36:7, :684:113]
wire same_cycle_resp = _same_cycle_resp_T_1 & _same_cycle_resp_T_2; // @[Monitor.scala:684:{55,88,113}]
wire [19:0] _inflight_T = inflight | a_set; // @[Monitor.scala:614:27, :626:34, :705:27]
wire [19:0] _inflight_T_1 = ~d_clr; // @[Monitor.scala:664:34, :705:38]
wire [19:0] _inflight_T_2 = _inflight_T & _inflight_T_1; // @[Monitor.scala:705:{27,36,38}]
wire [79:0] _inflight_opcodes_T = inflight_opcodes | a_opcodes_set; // @[Monitor.scala:616:35, :630:33, :706:43]
wire [79:0] _inflight_opcodes_T_1 = ~d_opcodes_clr; // @[Monitor.scala:668:33, :706:62]
wire [79:0] _inflight_opcodes_T_2 = _inflight_opcodes_T & _inflight_opcodes_T_1; // @[Monitor.scala:706:{43,60,62}]
wire [79:0] _inflight_sizes_T = inflight_sizes | a_sizes_set; // @[Monitor.scala:618:33, :632:31, :707:39]
wire [79:0] _inflight_sizes_T_1 = ~d_sizes_clr; // @[Monitor.scala:670:31, :707:56]
wire [79:0] _inflight_sizes_T_2 = _inflight_sizes_T & _inflight_sizes_T_1; // @[Monitor.scala:707:{39,54,56}]
reg [31:0] watchdog; // @[Monitor.scala:709:27]
wire [32:0] _watchdog_T = {1'h0, watchdog} + 33'h1; // @[Monitor.scala:709:27, :714:26]
wire [31:0] _watchdog_T_1 = _watchdog_T[31:0]; // @[Monitor.scala:714:26]
reg [19:0] inflight_1; // @[Monitor.scala:726:35]
wire [19:0] _inflight_T_3 = inflight_1; // @[Monitor.scala:726:35, :814:35]
reg [79:0] inflight_opcodes_1; // @[Monitor.scala:727:35]
wire [79:0] _inflight_opcodes_T_3 = inflight_opcodes_1; // @[Monitor.scala:727:35, :815:43]
reg [79:0] inflight_sizes_1; // @[Monitor.scala:728:35]
wire [79:0] _inflight_sizes_T_3 = inflight_sizes_1; // @[Monitor.scala:728:35, :816:41]
wire [5:0] _d_first_beats1_decode_T_7 = _d_first_beats1_decode_T_6[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _d_first_beats1_decode_T_8 = ~_d_first_beats1_decode_T_7; // @[package.scala:243:{46,76}]
wire [2:0] d_first_beats1_decode_2 = _d_first_beats1_decode_T_8[5:3]; // @[package.scala:243:46]
wire [2:0] d_first_beats1_2 = d_first_beats1_opdata_2 ? d_first_beats1_decode_2 : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [2:0] d_first_counter_2; // @[Edges.scala:229:27]
wire [3:0] _d_first_counter1_T_2 = {1'h0, d_first_counter_2} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] d_first_counter1_2 = _d_first_counter1_T_2[2:0]; // @[Edges.scala:230:28]
wire d_first_2 = d_first_counter_2 == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T_4 = d_first_counter_2 == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_5 = d_first_beats1_2 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last_2 = _d_first_last_T_4 | _d_first_last_T_5; // @[Edges.scala:232:{25,33,43}]
wire d_first_done_2 = d_first_last_2 & _d_first_T_2; // @[Decoupled.scala:51:35]
wire [2:0] _d_first_count_T_2 = ~d_first_counter1_2; // @[Edges.scala:230:28, :234:27]
wire [2:0] d_first_count_2 = d_first_beats1_2 & _d_first_count_T_2; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _d_first_counter_T_2 = d_first_2 ? d_first_beats1_2 : d_first_counter1_2; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [3:0] c_opcode_lookup; // @[Monitor.scala:747:35]
wire [3:0] c_size_lookup; // @[Monitor.scala:748:35]
wire [79:0] _c_opcode_lookup_T_1 = inflight_opcodes_1 >> _c_opcode_lookup_T; // @[Monitor.scala:727:35, :749:{44,69}]
wire [79:0] _c_opcode_lookup_T_6 = {76'h0, _c_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:749:{44,97}]
wire [79:0] _c_opcode_lookup_T_7 = {1'h0, _c_opcode_lookup_T_6[79:1]}; // @[Monitor.scala:749:{97,152}]
assign c_opcode_lookup = _c_opcode_lookup_T_7[3:0]; // @[Monitor.scala:747:35, :749:{21,152}]
wire [79:0] _c_size_lookup_T_1 = inflight_sizes_1 >> _c_size_lookup_T; // @[Monitor.scala:728:35, :750:{42,67}]
wire [79:0] _c_size_lookup_T_6 = {76'h0, _c_size_lookup_T_1[3:0]}; // @[Monitor.scala:750:{42,93}]
wire [79:0] _c_size_lookup_T_7 = {1'h0, _c_size_lookup_T_6[79:1]}; // @[Monitor.scala:750:{93,146}]
assign c_size_lookup = _c_size_lookup_T_7[3:0]; // @[Monitor.scala:748:35, :750:{21,146}]
wire [19:0] d_clr_1; // @[Monitor.scala:774:34]
wire [19:0] d_clr_wo_ready_1; // @[Monitor.scala:775:34]
wire [79:0] d_opcodes_clr_1; // @[Monitor.scala:776:34]
wire [79:0] d_sizes_clr_1; // @[Monitor.scala:777:34]
wire _T_776 = io_in_d_valid_0 & d_first_2; // @[Monitor.scala:36:7, :784:26]
assign d_clr_wo_ready_1 = _T_776 & d_release_ack_1 ? _d_clr_wo_ready_T_1[19:0] : 20'h0; // @[OneHot.scala:58:35]
wire _T_758 = _T_805 & d_first_2 & d_release_ack_1; // @[Decoupled.scala:51:35]
assign d_clr_1 = _T_758 ? _d_clr_T_1[19:0] : 20'h0; // @[OneHot.scala:58:35]
wire [270:0] _d_opcodes_clr_T_11 = 271'hF << _d_opcodes_clr_T_10; // @[Monitor.scala:790:{76,101}]
assign d_opcodes_clr_1 = _T_758 ? _d_opcodes_clr_T_11[79:0] : 80'h0; // @[Monitor.scala:776:34, :788:{25,70,88}, :790:{21,76}]
wire [270:0] _d_sizes_clr_T_11 = 271'hF << _d_sizes_clr_T_10; // @[Monitor.scala:791:{74,99}]
assign d_sizes_clr_1 = _T_758 ? _d_sizes_clr_T_11[79:0] : 80'h0; // @[Monitor.scala:777:34, :788:{25,70,88}, :791:{21,74}]
wire _same_cycle_resp_T_8 = io_in_d_bits_source_0 == 5'h0; // @[Monitor.scala:36:7, :795:113]
wire [19:0] _inflight_T_4 = ~d_clr_1; // @[Monitor.scala:774:34, :814:46]
wire [19:0] _inflight_T_5 = _inflight_T_3 & _inflight_T_4; // @[Monitor.scala:814:{35,44,46}]
wire [79:0] _inflight_opcodes_T_4 = ~d_opcodes_clr_1; // @[Monitor.scala:776:34, :815:62]
wire [79:0] _inflight_opcodes_T_5 = _inflight_opcodes_T_3 & _inflight_opcodes_T_4; // @[Monitor.scala:815:{43,60,62}]
wire [79:0] _inflight_sizes_T_4 = ~d_sizes_clr_1; // @[Monitor.scala:777:34, :816:58]
wire [79:0] _inflight_sizes_T_5 = _inflight_sizes_T_3 & _inflight_sizes_T_4; // @[Monitor.scala:816:{41,56,58}]
reg [31:0] watchdog_1; // @[Monitor.scala:818:27] |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftRegisterPriorityQueue.scala:
package compressacc
import chisel3._
import chisel3.util._
import chisel3.util._
// TODO : support enq & deq at the same cycle
class PriorityQueueStageIO(keyWidth: Int, value: ValueInfo) extends Bundle {
val output_prev = KeyValue(keyWidth, value)
val output_nxt = KeyValue(keyWidth, value)
val input_prev = Flipped(KeyValue(keyWidth, value))
val input_nxt = Flipped(KeyValue(keyWidth, value))
val cmd = Flipped(Valid(UInt(1.W)))
val insert_here = Input(Bool())
val cur_input_keyval = Flipped(KeyValue(keyWidth, value))
val cur_output_keyval = KeyValue(keyWidth, value)
}
class PriorityQueueStage(keyWidth: Int, value: ValueInfo) extends Module {
val io = IO(new PriorityQueueStageIO(keyWidth, value))
dontTouch(io)
val CMD_DEQ = 0.U
val CMD_ENQ = 1.U
val MAX_VALUE = (1 << keyWidth) - 1
val key_reg = RegInit(MAX_VALUE.U(keyWidth.W))
val value_reg = Reg(value)
io.output_prev.key := key_reg
io.output_prev.value := value_reg
io.output_nxt.key := key_reg
io.output_nxt.value := value_reg
io.cur_output_keyval.key := key_reg
io.cur_output_keyval.value := value_reg
when (io.cmd.valid) {
switch (io.cmd.bits) {
is (CMD_DEQ) {
key_reg := io.input_nxt.key
value_reg := io.input_nxt.value
}
is (CMD_ENQ) {
when (io.insert_here) {
key_reg := io.cur_input_keyval.key
value_reg := io.cur_input_keyval.value
} .elsewhen (key_reg >= io.cur_input_keyval.key) {
key_reg := io.input_prev.key
value_reg := io.input_prev.value
} .otherwise {
// do nothing
}
}
}
}
}
object PriorityQueueStage {
def apply(keyWidth: Int, v: ValueInfo): PriorityQueueStage = new PriorityQueueStage(keyWidth, v)
}
// TODO
// - This design is not scalable as the enqued_keyval is broadcasted to all the stages
// - Add pipeline registers later
class PriorityQueueIO(queSize: Int, keyWidth: Int, value: ValueInfo) extends Bundle {
val cnt_bits = log2Ceil(queSize+1)
val counter = Output(UInt(cnt_bits.W))
val enq = Flipped(Decoupled(KeyValue(keyWidth, value)))
val deq = Decoupled(KeyValue(keyWidth, value))
}
class PriorityQueue(queSize: Int, keyWidth: Int, value: ValueInfo) extends Module {
val keyWidthInternal = keyWidth + 1
val CMD_DEQ = 0.U
val CMD_ENQ = 1.U
val io = IO(new PriorityQueueIO(queSize, keyWidthInternal, value))
dontTouch(io)
val MAX_VALUE = ((1 << keyWidthInternal) - 1).U
val cnt_bits = log2Ceil(queSize+1)
// do not consider cases where we are inserting more entries then the queSize
val counter = RegInit(0.U(cnt_bits.W))
io.counter := counter
val full = (counter === queSize.U)
val empty = (counter === 0.U)
io.deq.valid := !empty
io.enq.ready := !full
when (io.enq.fire) {
counter := counter + 1.U
}
when (io.deq.fire) {
counter := counter - 1.U
}
val cmd_valid = io.enq.valid || io.deq.ready
val cmd = Mux(io.enq.valid, CMD_ENQ, CMD_DEQ)
assert(!(io.enq.valid && io.deq.ready))
val stages = Seq.fill(queSize)(Module(new PriorityQueueStage(keyWidthInternal, value)))
for (i <- 0 until (queSize - 1)) {
stages(i+1).io.input_prev <> stages(i).io.output_nxt
stages(i).io.input_nxt <> stages(i+1).io.output_prev
}
stages(queSize-1).io.input_nxt.key := MAX_VALUE
// stages(queSize-1).io.input_nxt.value :=
stages(queSize-1).io.input_nxt.value.symbol := 0.U
// stages(queSize-1).io.input_nxt.value.child(0) := 0.U
// stages(queSize-1).io.input_nxt.value.child(1) := 0.U
stages(0).io.input_prev.key := io.enq.bits.key
stages(0).io.input_prev.value <> io.enq.bits.value
for (i <- 0 until queSize) {
stages(i).io.cmd.valid := cmd_valid
stages(i).io.cmd.bits := cmd
stages(i).io.cur_input_keyval <> io.enq.bits
}
val is_large_or_equal = WireInit(VecInit(Seq.fill(queSize)(false.B)))
for (i <- 0 until queSize) {
is_large_or_equal(i) := (stages(i).io.cur_output_keyval.key >= io.enq.bits.key)
}
val is_large_or_equal_cat = Wire(UInt(queSize.W))
is_large_or_equal_cat := Cat(is_large_or_equal.reverse)
val insert_here_idx = PriorityEncoder(is_large_or_equal_cat)
for (i <- 0 until queSize) {
when (i.U === insert_here_idx) {
stages(i).io.insert_here := true.B
} .otherwise {
stages(i).io.insert_here := false.B
}
}
io.deq.bits <> stages(0).io.output_prev
}
| module PriorityQueueStage_236( // @[ShiftRegisterPriorityQueue.scala:21:7]
input clock, // @[ShiftRegisterPriorityQueue.scala:21:7]
input reset, // @[ShiftRegisterPriorityQueue.scala:21:7]
output [30:0] io_output_prev_key, // @[ShiftRegisterPriorityQueue.scala:22:14]
output [9:0] io_output_prev_value_symbol, // @[ShiftRegisterPriorityQueue.scala:22:14]
output [30:0] io_output_nxt_key, // @[ShiftRegisterPriorityQueue.scala:22:14]
output [9:0] io_output_nxt_value_symbol, // @[ShiftRegisterPriorityQueue.scala:22:14]
input [30:0] io_input_prev_key, // @[ShiftRegisterPriorityQueue.scala:22:14]
input [9:0] io_input_prev_value_symbol, // @[ShiftRegisterPriorityQueue.scala:22:14]
input [30:0] io_input_nxt_key, // @[ShiftRegisterPriorityQueue.scala:22:14]
input [9:0] io_input_nxt_value_symbol, // @[ShiftRegisterPriorityQueue.scala:22:14]
input io_cmd_valid, // @[ShiftRegisterPriorityQueue.scala:22:14]
input io_cmd_bits, // @[ShiftRegisterPriorityQueue.scala:22:14]
input io_insert_here, // @[ShiftRegisterPriorityQueue.scala:22:14]
input [30:0] io_cur_input_keyval_key, // @[ShiftRegisterPriorityQueue.scala:22:14]
input [9:0] io_cur_input_keyval_value_symbol, // @[ShiftRegisterPriorityQueue.scala:22:14]
output [30:0] io_cur_output_keyval_key, // @[ShiftRegisterPriorityQueue.scala:22:14]
output [9:0] io_cur_output_keyval_value_symbol // @[ShiftRegisterPriorityQueue.scala:22:14]
);
wire [30:0] io_input_prev_key_0 = io_input_prev_key; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [9:0] io_input_prev_value_symbol_0 = io_input_prev_value_symbol; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [30:0] io_input_nxt_key_0 = io_input_nxt_key; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [9:0] io_input_nxt_value_symbol_0 = io_input_nxt_value_symbol; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire io_cmd_valid_0 = io_cmd_valid; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire io_cmd_bits_0 = io_cmd_bits; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire io_insert_here_0 = io_insert_here; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [30:0] io_cur_input_keyval_key_0 = io_cur_input_keyval_key; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [9:0] io_cur_input_keyval_value_symbol_0 = io_cur_input_keyval_value_symbol; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [9:0] io_output_prev_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [30:0] io_output_prev_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [9:0] io_output_nxt_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [30:0] io_output_nxt_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [9:0] io_cur_output_keyval_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [30:0] io_cur_output_keyval_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
reg [30:0] key_reg; // @[ShiftRegisterPriorityQueue.scala:30:24]
assign io_output_prev_key_0 = key_reg; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24]
assign io_output_nxt_key_0 = key_reg; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24]
assign io_cur_output_keyval_key_0 = key_reg; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24]
reg [9:0] value_reg_symbol; // @[ShiftRegisterPriorityQueue.scala:31:22]
assign io_output_prev_value_symbol_0 = value_reg_symbol; // @[ShiftRegisterPriorityQueue.scala:21:7, :31:22]
assign io_output_nxt_value_symbol_0 = value_reg_symbol; // @[ShiftRegisterPriorityQueue.scala:21:7, :31:22]
assign io_cur_output_keyval_value_symbol_0 = value_reg_symbol; // @[ShiftRegisterPriorityQueue.scala:21:7, :31:22]
wire _T_2 = key_reg >= io_cur_input_keyval_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24, :52:30]
always @(posedge clock) begin // @[ShiftRegisterPriorityQueue.scala:21:7]
if (reset) // @[ShiftRegisterPriorityQueue.scala:21:7]
key_reg <= 31'h7FFFFFFF; // @[ShiftRegisterPriorityQueue.scala:30:24]
else if (io_cmd_valid_0) begin // @[ShiftRegisterPriorityQueue.scala:21:7]
if (io_cmd_bits_0) begin // @[ShiftRegisterPriorityQueue.scala:21:7]
if (io_insert_here_0) // @[ShiftRegisterPriorityQueue.scala:21:7]
key_reg <= io_cur_input_keyval_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24]
else if (_T_2) // @[ShiftRegisterPriorityQueue.scala:52:30]
key_reg <= io_input_prev_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24]
end
else // @[ShiftRegisterPriorityQueue.scala:21:7]
key_reg <= io_input_nxt_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24]
end
if (io_cmd_valid_0) begin // @[ShiftRegisterPriorityQueue.scala:21:7]
if (io_cmd_bits_0) begin // @[ShiftRegisterPriorityQueue.scala:21:7]
if (io_insert_here_0) // @[ShiftRegisterPriorityQueue.scala:21:7]
value_reg_symbol <= io_cur_input_keyval_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :31:22]
else if (_T_2) // @[ShiftRegisterPriorityQueue.scala:52:30]
value_reg_symbol <= io_input_prev_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :31:22]
end
else // @[ShiftRegisterPriorityQueue.scala:21:7]
value_reg_symbol <= io_input_nxt_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :31:22]
end
always @(posedge)
assign io_output_prev_key = io_output_prev_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
assign io_output_prev_value_symbol = io_output_prev_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
assign io_output_nxt_key = io_output_nxt_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
assign io_output_nxt_value_symbol = io_output_nxt_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
assign io_cur_output_keyval_key = io_cur_output_keyval_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
assign io_cur_output_keyval_value_symbol = io_cur_output_keyval_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File LoopMatmul.scala:
package gemmini
import chisel3._
import chisel3.util._
import chisel3.experimental._
import freechips.rocketchip.tile.RoCCCommand
import org.chipsalliance.cde.config.Parameters
import GemminiISA._
import LocalAddr._
import Util._
// LdA
class LoopMatmulLdAReq(val block_size: Int, val coreMaxAddrBits: Int, val iterator_bitwidth: Int, val max_addr: Int, val concurrent_loops: Int) extends Bundle {
val max_i = UInt(iterator_bitwidth.W)
val max_k = UInt(iterator_bitwidth.W)
val pad_i = UInt(log2Up(block_size).W)
val pad_k = UInt(log2Up(block_size).W)
val dram_addr = UInt(coreMaxAddrBits.W)
val dram_stride = UInt(coreMaxAddrBits.W)
val transpose = Bool()
val addr_start = UInt(log2Up(max_addr).W)
val loop_id = UInt(log2Up(concurrent_loops).W)
val is_resadd = Bool()
}
class LoopMatmulLdA(block_size: Int, coreMaxAddrBits: Int, iterator_bitwidth: Int, max_addr: Int, input_w: Int,
max_block_len: Int, concurrent_loops: Int, mvin_rs2_t: MvinRs2)
(implicit p: Parameters) extends Module {
val io = IO(new Bundle {
val req = Flipped(Decoupled(new LoopMatmulLdAReq(block_size, coreMaxAddrBits, iterator_bitwidth, max_addr, concurrent_loops)))
val cmd = Decoupled(Output(new RoCCCommand))
val i = Output(UInt(iterator_bitwidth.W))
val k = Output(UInt(iterator_bitwidth.W))
val idle = Output(Bool())
val rob_overloaded = Input(Bool())
val loop_id = Output(UInt(log2Up(concurrent_loops).W))
})
object State extends ChiselEnum {
val idle, ld = Value
}
import State._
val state = RegInit(idle)
val req = Reg(new LoopMatmulLdAReq(block_size, coreMaxAddrBits, iterator_bitwidth, max_addr, concurrent_loops))
val i = Reg(UInt(iterator_bitwidth.W))
val k = Reg(UInt(iterator_bitwidth.W))
val row_iterator = Mux(req.transpose, k, i)
val col_iterator = Mux(req.transpose, i, k)
val max_row_iterator = Mux(req.transpose, req.max_k, req.max_i)
val max_col_iterator = Mux(req.transpose, req.max_i, req.max_k)
val row_pad = Mux(req.transpose, req.pad_k, req.pad_i)
val col_pad = Mux(req.transpose, req.pad_i, req.pad_k)
val max_col_dim = Mux(req.transpose, req.max_i, req.max_k)
val max_blocks = Mux(max_col_dim <= max_block_len.U, max_col_dim, max_block_len.U)
val sp_addr_start = req.addr_start
val dram_offset = (row_iterator * req.dram_stride + col_iterator) * block_size.U * (input_w/8).U
val dram_addr = req.dram_addr + LoopMatmul.castDramOffset(dram_offset)
val sp_addr = sp_addr_start + (row_iterator * max_col_iterator + col_iterator) * block_size.U
val blocks = Mux(col_iterator + max_blocks <= max_col_iterator, max_blocks, max_col_iterator-col_iterator)
val cols = (blocks * block_size.U) - Mux(col_iterator + blocks >= max_col_iterator, col_pad, 0.U)
val rows = block_size.U - Mux(row_iterator === max_row_iterator-1.U, row_pad, 0.U)
val mvin_cmd = Wire(new RoCCCommand)
mvin_cmd := DontCare
mvin_cmd.inst.funct := LOAD_CMD
mvin_cmd.rs1 := dram_addr
val mvin_cmd_rs2 = Wire(mvin_rs2_t.cloneType)
mvin_cmd_rs2 := DontCare
mvin_cmd_rs2.num_rows := rows.asUInt
mvin_cmd_rs2.num_cols := cols.asUInt
mvin_cmd_rs2.local_addr := cast_to_sp_addr(mvin_cmd_rs2.local_addr, sp_addr)
mvin_cmd.rs2 := mvin_cmd_rs2.asUInt
when(req.is_resadd){
mvin_cmd_rs2.local_addr := cast_to_acc_addr(mvin_cmd_rs2.local_addr, sp_addr, accumulate = false.B, read_full = false.B)
}
io.req.ready := state === idle
io.i := i
io.k := k
io.idle := state === idle
io.cmd.valid := state =/= idle && !io.rob_overloaded && req.dram_addr =/= 0.U
io.cmd.bits := mvin_cmd
io.loop_id := req.loop_id
when(req.dram_addr === 0.U){
state := idle
}.elsewhen(io.cmd.fire) {
// The order here is k, j, i
val i_blocks = Mux(req.transpose, max_blocks, 1.U)
val k_blocks = Mux(req.transpose, 1.U, max_blocks)
val next_i = floorAdd(i, i_blocks, req.max_i)
val next_k = floorAdd(k, k_blocks, req.max_k, next_i === 0.U)
i := next_i
k := next_k
when (next_i === 0.U && next_k === 0.U) {
state := idle
}
}
when (io.req.fire) {
req := io.req.bits
state := ld
i := 0.U
k := 0.U
}
}
// LdB
class LoopMatmulLdBReq(val block_size: Int, val coreMaxAddrBits: Int, val iterator_bitwidth: Int, val max_addr: Int, val concurrent_loops: Int) extends Bundle {
val max_k = UInt(iterator_bitwidth.W)
val max_j = UInt(iterator_bitwidth.W)
val pad_k = UInt(log2Up(block_size).W)
val pad_j = UInt(log2Up(block_size).W)
val dram_addr = UInt(coreMaxAddrBits.W)
val dram_stride = UInt(coreMaxAddrBits.W)
val transpose = Bool()
val addr_end = UInt(log2Up(max_addr+1).W)
val loop_id = UInt(log2Up(concurrent_loops).W)
val is_resadd = Bool()
}
class LoopMatmulLdB(block_size: Int, coreMaxAddrBits: Int, iterator_bitwidth: Int, max_addr: Int, input_w: Int,
max_block_len: Int, concurrent_loops: Int, mvin_rs2_t: MvinRs2)
(implicit p: Parameters) extends Module {
val io = IO(new Bundle {
val req = Flipped(Decoupled(new LoopMatmulLdBReq(block_size, coreMaxAddrBits, iterator_bitwidth, max_addr, concurrent_loops)))
val cmd = Decoupled(Output(new RoCCCommand))
val k = Output(UInt(iterator_bitwidth.W))
val j = Output(UInt(iterator_bitwidth.W))
val idle = Output(Bool())
val rob_overloaded = Input(Bool())
val loop_id = Output(UInt(log2Up(concurrent_loops).W))
})
object State extends ChiselEnum {
val idle, ld = Value
}
import State._
val state = RegInit(idle)
val req = Reg(new LoopMatmulLdBReq(block_size, coreMaxAddrBits, iterator_bitwidth, max_addr, concurrent_loops))
val k = Reg(UInt(iterator_bitwidth.W))
val j = Reg(UInt(iterator_bitwidth.W))
val row_iterator = Mux(req.transpose, j, k)
val col_iterator = Mux(req.transpose, k, j)
val max_row_iterator = Mux(req.transpose, req.max_j, req.max_k)
val max_col_iterator = Mux(req.transpose, req.max_k, req.max_j)
val row_pad = Mux(req.transpose, req.pad_j, req.pad_k)
val col_pad = Mux(req.transpose, req.pad_k, req.pad_j)
val max_col_dim = Mux(req.transpose, req.max_k, req.max_j)
val max_blocks = Mux(max_col_dim <= max_block_len.U, max_col_dim, max_block_len.U)
val sp_addr_start = Mux(req.is_resadd, req.addr_end, req.addr_end - req.max_k * req.max_j * block_size.U)
val dram_offset = (row_iterator * req.dram_stride + col_iterator) * block_size.U * (input_w/8).U
val dram_addr = req.dram_addr + LoopMatmul.castDramOffset(dram_offset)
val sp_addr = sp_addr_start + (row_iterator * max_col_iterator + col_iterator) * block_size.U
val blocks = Mux(col_iterator + max_blocks <= max_col_iterator, max_blocks, max_col_iterator-col_iterator)
val cols = (blocks * block_size.U) - Mux(col_iterator + blocks >= max_col_iterator, col_pad, 0.U)
val rows = block_size.U - Mux(max_row_iterator === max_row_iterator-1.U, row_pad, 0.U)
val mvin_cmd = Wire(new RoCCCommand)
mvin_cmd := DontCare
mvin_cmd.inst.funct := LOAD2_CMD
mvin_cmd.rs1 := dram_addr
val mvin_cmd_rs2 = Wire(mvin_rs2_t.cloneType)
mvin_cmd_rs2 := DontCare
mvin_cmd_rs2.num_rows := rows.asUInt
mvin_cmd_rs2.num_cols := cols.asUInt
mvin_cmd_rs2.local_addr := cast_to_sp_addr(mvin_cmd_rs2.local_addr, sp_addr)
mvin_cmd.rs2 := mvin_cmd_rs2.asUInt
when (req.is_resadd){
mvin_cmd_rs2.local_addr := cast_to_acc_addr(mvin_cmd_rs2.local_addr, sp_addr, accumulate = true.B, read_full = false.B)
}
io.req.ready := state === idle
io.k := k
io.j := j
io.idle := state === idle
io.cmd.valid := state =/= idle && !io.rob_overloaded && req.dram_addr =/= 0.U
io.cmd.bits := mvin_cmd
io.loop_id := req.loop_id
when(req.dram_addr === 0.U){
state := idle
}.elsewhen(io.cmd.fire) {
// The order here is k, j, i
val j_blocks = Mux(req.transpose, 1.U, max_blocks)
val k_blocks = Mux(req.transpose, max_blocks, 1.U)
val next_j = floorAdd(j, j_blocks, req.max_j)
val next_k = floorAdd(k, k_blocks, req.max_k, next_j === 0.U)
j := next_j
k := next_k
when (next_j === 0.U && next_k === 0.U) {
state := idle
}
}
when (io.req.fire) {
req := io.req.bits
state := ld
j := 0.U
k := 0.U
}
}
// LdD
class LoopMatmulLdDReq(val block_size: Int, val coreMaxAddrBits: Int, val iterator_bitwidth: Int, val max_acc_addr: Int, val concurrent_loops: Int) extends Bundle {
val max_j = UInt(iterator_bitwidth.W)
val max_i = UInt(iterator_bitwidth.W)
val pad_j = UInt(log2Up(block_size).W)
val pad_i = UInt(log2Up(block_size).W)
val dram_addr = UInt(coreMaxAddrBits.W)
val dram_stride = UInt(coreMaxAddrBits.W)
val low_d = Bool()
val addr_start = UInt(log2Up(max_acc_addr).W)
val loop_id = UInt(log2Up(concurrent_loops).W)
}
class LoopMatmulLdD(block_size: Int, coreMaxAddrBits: Int, iterator_bitwidth: Int, max_acc_addr: Int, input_w: Int,
acc_w: Int, max_block_len: Int, max_block_len_acc: Int, concurrent_loops: Int, mvin_rs2_t: MvinRs2)
(implicit p: Parameters) extends Module {
val io = IO(new Bundle {
val req = Flipped(Decoupled(new LoopMatmulLdDReq(block_size, coreMaxAddrBits, iterator_bitwidth, max_acc_addr, concurrent_loops)))
val cmd = Decoupled(Output(new RoCCCommand))
val idle = Output(Bool())
val rob_overloaded = Input(Bool())
val loop_id = Output(UInt(log2Up(concurrent_loops).W))
})
object State extends ChiselEnum {
val idle, ld = Value
}
import State._
val state = RegInit(idle)
val req = Reg(new LoopMatmulLdDReq(block_size, coreMaxAddrBits, iterator_bitwidth, max_acc_addr, concurrent_loops))
val max_blocks = Mux(req.low_d, Mux(req.max_j <= max_block_len.U, req.max_j, max_block_len.U),
Mux(req.max_j <= max_block_len_acc.U, req.max_j, max_block_len_acc.U))
val j = Reg(UInt(iterator_bitwidth.W))
val i = Reg(UInt(iterator_bitwidth.W))
val acc_addr_start = req.addr_start
val dram_offset = Mux(req.low_d, (i * req.dram_stride + j) * block_size.U * (input_w/8).U,
(i * req.dram_stride + j) * block_size.U * (acc_w/8).U)
val dram_addr = req.dram_addr + LoopMatmul.castDramOffset(dram_offset)
val sp_addr = acc_addr_start + (i * req.max_j + j) * block_size.U
val blocks = Mux(j + max_blocks <= req.max_j, max_blocks, req.max_j-j)
val cols = (blocks * block_size.U) - Mux(j + blocks >= req.max_j, req.pad_j, 0.U)
val rows = block_size.U - Mux(i === req.max_i-1.U, req.pad_i, 0.U)
val mvin_cmd = Wire(new RoCCCommand)
mvin_cmd := DontCare
mvin_cmd.inst.funct := LOAD3_CMD
mvin_cmd.rs1 := dram_addr
val mvin_cmd_rs2 = Wire(mvin_rs2_t.cloneType)
mvin_cmd_rs2 := DontCare
mvin_cmd_rs2.num_rows := rows.asUInt
mvin_cmd_rs2.num_cols := cols.asUInt
mvin_cmd_rs2.local_addr := cast_to_acc_addr(mvin_cmd_rs2.local_addr, sp_addr, accumulate = false.B, read_full = false.B)
mvin_cmd.rs2 := mvin_cmd_rs2.asUInt
io.req.ready := state === idle
io.idle := state === idle
// The order here is k, j, i
io.cmd.valid := state =/= idle && !io.rob_overloaded && req.dram_addr =/= 0.U
io.cmd.bits := mvin_cmd
io.loop_id := req.loop_id
when (req.dram_addr === 0.U) {
state := idle
}.elsewhen (io.cmd.fire) {
// The order here is k, j, i
val next_i = floorAdd(i, 1.U, req.max_i)
val next_j = floorAdd(j, max_blocks, req.max_j, next_i === 0.U)
i := next_i
j := next_j
when (next_i === 0.U && next_j === 0.U) {
state := idle
}
}
when (io.req.fire) {
req := io.req.bits
state := ld
j := 0.U
i := 0.U
}
}
// Compute
class LoopMatmulExecuteReq(val block_size: Int, val coreMaxAddrBits: Int, val iterator_bitwidth: Int, val max_addr: Int, val max_acc_addr: Int, val concurrent_loops: Int) extends Bundle {
val max_j = UInt(iterator_bitwidth.W)
val max_k = UInt(iterator_bitwidth.W)
val max_i = UInt(iterator_bitwidth.W)
val pad_j = UInt(log2Up(block_size).W)
val pad_k = UInt(log2Up(block_size).W)
val pad_i = UInt(log2Up(block_size).W)
val a_tranpose = Bool()
val b_tranpose = Bool()
val accumulate = Bool()
val a_addr_start = UInt(log2Up(max_addr).W)
val b_addr_end = UInt(log2Up(max_addr+1).W)
val c_addr_start = UInt(log2Up(max_acc_addr).W)
val loop_id = UInt(log2Up(concurrent_loops).W)
val skip = Bool()
}
class LoopMatmulExecute(block_size: Int, coreMaxAddrBits: Int, iterator_bitwidth: Int, max_addr: Int, max_acc_addr: Int, concurrent_loops: Int,
preload_rs1_t: PreloadRs, preload_rs2_t: PreloadRs,
compute_rs1_t: ComputeRs, compute_rs2_t: ComputeRs)
(implicit p: Parameters) extends Module {
val io = IO(new Bundle {
val req = Flipped(Decoupled(new LoopMatmulExecuteReq(block_size, coreMaxAddrBits, iterator_bitwidth, max_addr, max_acc_addr, concurrent_loops)))
val cmd = Decoupled(Output(new RoCCCommand))
val k = Output(UInt(iterator_bitwidth.W))
val j = Output(UInt(iterator_bitwidth.W))
val i = Output(UInt(iterator_bitwidth.W))
val ld_ka = Input(UInt(iterator_bitwidth.W))
val ld_kb = Input(UInt(iterator_bitwidth.W))
val ld_j = Input(UInt(iterator_bitwidth.W))
val ld_i = Input(UInt(iterator_bitwidth.W))
val lda_completed = Input(Bool())
val ldb_completed = Input(Bool())
val ldd_completed = Input(Bool())
val idle = Output(Bool())
val rob_overloaded = Input(Bool())
val loop_id = Output(UInt(log2Up(concurrent_loops).W))
})
object State extends ChiselEnum {
val idle, pre, comp = Value
}
import State._
val state = RegInit(idle)
val req = Reg(new LoopMatmulExecuteReq(block_size, coreMaxAddrBits, iterator_bitwidth, max_addr, max_acc_addr, concurrent_loops))
val c_addr_start = /*(BigInt(1) << 31).U |*/ req.c_addr_start
val b_addr_start = req.b_addr_end - req.max_k * req.max_j * block_size.U
val k = Reg(UInt(iterator_bitwidth.W))
val j = Reg(UInt(iterator_bitwidth.W))
val i = Reg(UInt(iterator_bitwidth.W))
val a_row = Mux(req.a_tranpose, k, i)
val a_col = Mux(req.a_tranpose, i, k)
val b_row = Mux(req.b_tranpose, j, k)
val b_col = Mux(req.b_tranpose, k, j)
val a_max_col = Mux(req.a_tranpose, req.max_i, req.max_k)
val b_max_col = Mux(req.b_tranpose, req.max_k, req.max_j)
val a_addr = req.a_addr_start + (a_row * a_max_col + a_col) * block_size.U
val b_addr = b_addr_start + (b_row * b_max_col + b_col) * block_size.U
val c_addr = c_addr_start + (i * req.max_j + j) * block_size.U
val a_cols = block_size.U - Mux(k === req.max_k - 1.U, req.pad_k, 0.U)
val a_rows = block_size.U - Mux(i === req.max_i - 1.U, req.pad_i, 0.U)
val b_cols = block_size.U - Mux(j === req.max_j - 1.U, req.pad_j, 0.U)
val b_rows = block_size.U - Mux(k === req.max_k - 1.U, req.pad_k, 0.U)
val c_cols = block_size.U - Mux(j === req.max_j - 1.U, req.pad_j, 0.U)
val c_rows = block_size.U - Mux(i === req.max_i - 1.U, req.pad_i, 0.U)
val pre_cmd = Wire(new RoCCCommand)
pre_cmd := DontCare
pre_cmd.inst.funct := PRELOAD_CMD
val pre_cmd_rs1 = Wire(preload_rs1_t.cloneType)
pre_cmd_rs1 := DontCare
pre_cmd_rs1.num_rows := b_rows.asUInt
pre_cmd_rs1.num_cols := b_cols.asUInt
pre_cmd_rs1.local_addr := Mux(i === 0.U, cast_to_sp_addr(pre_cmd_rs1.local_addr, b_addr),
garbage_addr(pre_cmd_rs1.local_addr))
val pre_cmd_rs2 = Wire(preload_rs2_t.cloneType)
pre_cmd_rs2 := DontCare
pre_cmd_rs2.num_rows := c_rows.asUInt
pre_cmd_rs2.num_cols := c_cols.asUInt
pre_cmd_rs2.local_addr := cast_to_acc_addr(pre_cmd_rs2.local_addr, c_addr, accumulate = req.accumulate || k =/= 0.U, read_full = false.B)
pre_cmd.rs1 := pre_cmd_rs1.asUInt
pre_cmd.rs2 := pre_cmd_rs2.asUInt
val comp_cmd = Wire(new RoCCCommand())
comp_cmd := DontCare
comp_cmd.inst.funct := Mux(i === 0.U, COMPUTE_AND_FLIP_CMD, COMPUTE_AND_STAY_CMD)
val comp_cmd_rs1 = Wire(compute_rs1_t.cloneType)
comp_cmd_rs1 := DontCare
comp_cmd_rs1.num_rows := a_rows.asUInt
comp_cmd_rs1.num_cols := a_cols.asUInt
comp_cmd_rs1.local_addr := cast_to_sp_addr(comp_cmd_rs1.local_addr, a_addr)
val comp_cmd_rs2 = Wire(compute_rs2_t.cloneType)
comp_cmd_rs2 := DontCare
comp_cmd_rs2.num_rows := block_size.U
comp_cmd_rs2.num_cols := block_size.U
comp_cmd_rs2.local_addr := garbage_addr(comp_cmd_rs2.local_addr)
comp_cmd.rs1 := comp_cmd_rs1.asUInt
comp_cmd.rs2 := comp_cmd_rs2.asUInt
io.req.ready := state === idle
io.k := k
io.j := j
io.i := i
io.idle := state === idle
// The order here is k, j, i
val lda_ahead = io.lda_completed || io.ld_ka > k || (io.ld_ka === k && io.ld_i > i)
val ldb_ahead = io.ldb_completed || io.ld_kb > k || (io.ld_ka === k && io.ld_j > j)
val ldd_ahead = io.ldd_completed
val ld_ahead = lda_ahead && ldb_ahead && ldd_ahead
io.cmd.valid := state =/= idle && !io.rob_overloaded && ld_ahead && !req.skip
io.cmd.bits := Mux(state === pre, pre_cmd, comp_cmd)
io.loop_id := req.loop_id
when(req.skip) {
state := idle
}.elsewhen (io.cmd.fire) {
when (state === pre) {
state := comp
}.otherwise {
val next_i = floorAdd(i, 1.U, req.max_i)
val next_j = floorAdd(j, 1.U, req.max_j, next_i === 0.U)
val next_k = floorAdd(k, 1.U, req.max_k, next_j === 0.U && next_i === 0.U)
k := next_k
j := next_j
i := next_i
state := Mux(next_k === 0.U && next_j === 0.U && next_i === 0.U, idle, pre)
}
}
when (io.req.fire) {
req := io.req.bits
state := pre
j := 0.U
k := 0.U
i := 0.U
}
assert(!(state =/= idle && req.a_tranpose && req.b_tranpose))
}
// StC
class LoopMatmulStCReq(val block_size: Int, val coreMaxAddrBits: Int, val iterator_bitwidth: Int, val max_acc_addr: Int, val concurrent_loops: Int) extends Bundle {
val max_k = UInt(iterator_bitwidth.W)
val max_j = UInt(iterator_bitwidth.W)
val max_i = UInt(iterator_bitwidth.W)
val pad_j = UInt(log2Up(block_size).W)
val pad_i = UInt(log2Up(block_size).W)
val dram_addr = UInt(coreMaxAddrBits.W)
val dram_stride = UInt(coreMaxAddrBits.W)
val full_c = Bool()
val act = UInt(Activation.bitwidth.W)
val addr_start = UInt(log2Up(max_acc_addr).W)
val loop_id = UInt(log2Up(concurrent_loops).W)
val is_resadd = Bool()
}
class LoopMatmulStC(block_size: Int, coreMaxAddrBits: Int, iterator_bitwidth: Int, max_acc_addr: Int, input_w: Int, acc_w: Int, max_block_len: Int, concurrent_loops: Int, mvout_rs2_t: MvoutRs2)
(implicit p: Parameters) extends Module {
val io = IO(new Bundle {
val req = Flipped(Decoupled(new LoopMatmulStCReq(block_size, coreMaxAddrBits, iterator_bitwidth, max_acc_addr, concurrent_loops)))
val cmd = Decoupled(Output(new RoCCCommand))
val ex_k = Input(UInt(iterator_bitwidth.W))
val ex_j = Input(UInt(iterator_bitwidth.W))
val ex_i = Input(UInt(iterator_bitwidth.W))
val ex_completed = Input(Bool())
val j = Output(UInt(iterator_bitwidth.W))
val i = Output(UInt(iterator_bitwidth.W))
val idle = Output(Bool())
val rob_overloaded = Input(Bool())
val loop_id = Output(UInt(log2Up(concurrent_loops).W))
})
object State extends ChiselEnum {
val idle, st, ln_config, ln_st = Value
}
import State._
val state = RegInit(idle)
val req = Reg(new LoopMatmulStCReq(block_size, coreMaxAddrBits, iterator_bitwidth, max_acc_addr, concurrent_loops))
val max_blocks = Mux(req.full_c, 1.U, Mux(req.max_j <= max_block_len.U, req.max_j, max_block_len.U))
// Non-normalization-related iterators and calculations
val j = Reg(UInt(iterator_bitwidth.W))
val i = Reg(UInt(iterator_bitwidth.W))
val acc_addr_start = /*(BigInt(1) << 31).U | (req.full_c << 29.U).asUInt |*/ req.addr_start
val dram_offset = Mux(req.full_c, (i * req.dram_stride + j) * block_size.U * (acc_w/8).U,
(i * req.dram_stride + j) * block_size.U * (input_w/8).U)
val dram_addr = req.dram_addr + LoopMatmul.castDramOffset(dram_offset)
val sp_addr = acc_addr_start + (i * req.max_j + j) * block_size.U
val blocks = Mux(j + max_blocks <= req.max_j, max_blocks, req.max_j-j)
val cols = (blocks * block_size.U) - Mux(j + blocks >= req.max_j, req.pad_j, 0.U)
val rows = block_size.U - Mux(i === req.max_i-1.U, req.pad_i, 0.U)
val mvout_cmd = Wire(new RoCCCommand)
mvout_cmd := DontCare
mvout_cmd.inst.funct := STORE_CMD
mvout_cmd.rs1 := dram_addr
val mvout_cmd_rs2 = Wire(mvout_rs2_t.cloneType)
mvout_cmd_rs2 := DontCare
mvout_cmd_rs2.num_rows := rows.asUInt
mvout_cmd_rs2.num_cols := cols.asUInt
mvout_cmd_rs2.local_addr := cast_to_acc_addr(mvout_cmd_rs2.local_addr, sp_addr, accumulate = false.B, read_full = req.full_c)
mvout_cmd.rs2 := mvout_cmd_rs2.asUInt
// Layernorm iterators and calculations
val ln_row = Reg(UInt(iterator_bitwidth.W))
val ln_cmd = Reg(UInt(iterator_bitwidth.W))
val ln_stat_id = Reg(UInt(iterator_bitwidth.W))
val NORM_STAT_IDS = 2 // TODO magic number
val ln_norm_cmds = VecInit(VecInit(NormCmd.SUM, NormCmd.MEAN), VecInit(NormCmd.VARIANCE, NormCmd.INV_STDDEV),
VecInit(NormCmd.RESET, NormCmd.RESET))
val sm_norm_cmds = VecInit(VecInit(NormCmd.MAX, NormCmd.MAX), VecInit(NormCmd.SUM_EXP, NormCmd.INV_SUM_EXP),
VecInit(NormCmd.RESET, NormCmd.RESET))
val ln_stat_ids = Mux(rows -& ln_row > NORM_STAT_IDS.U, NORM_STAT_IDS.U, rows -& ln_row)
val ln_r = ln_row +& ln_stat_id
val ln_sp_addr = acc_addr_start +& (i * req.max_j +& j) * block_size.U +& ln_r
val ln_norm_cmd = Mux(j +& max_blocks >= req.max_j,
Mux(req.act === Activation.LAYERNORM, ln_norm_cmds(ln_cmd)(1), sm_norm_cmds(ln_cmd)(1)),
Mux(req.act === Activation.LAYERNORM, ln_norm_cmds(ln_cmd)(0), sm_norm_cmds(ln_cmd)(0)))
// TODO we assume for now that full_C and layernorm aren't true at the same
val ln_dram_offset = ((i * req.dram_stride +& j) * block_size.U +& ln_r * req.dram_stride) * (input_w/8).U
val ln_dram_addr = req.dram_addr + LoopMatmul.castDramOffset(ln_dram_offset)
val ln_config_norm_rs1 = Wire(new GemminiISA.ConfigNormRs1)
ln_config_norm_rs1 := DontCare
ln_config_norm_rs1.set_stats_id_only := 1.U
ln_config_norm_rs1.cmd_type := CONFIG_NORM
ln_config_norm_rs1.norm_stats_id := ln_stat_id
val ln_config_norm = Wire(new RoCCCommand)
ln_config_norm := DontCare
ln_config_norm.inst.funct := CONFIG_CMD
ln_config_norm.rs1 := ln_config_norm_rs1.asUInt
ln_config_norm.rs2 := DontCare
val ln_mvout_cmd = Wire(new RoCCCommand)
ln_mvout_cmd := DontCare
ln_mvout_cmd.inst.funct := STORE_CMD
ln_mvout_cmd.rs1 := ln_dram_addr
val ln_mvout_cmd_rs2 = Wire(mvout_rs2_t.cloneType)
ln_mvout_cmd_rs2 := DontCare
ln_mvout_cmd_rs2.num_rows := 1.U
ln_mvout_cmd_rs2.num_cols := cols.asUInt
ln_mvout_cmd_rs2.local_addr := cast_to_acc_addr(ln_mvout_cmd_rs2.local_addr, ln_sp_addr, accumulate = false.B, read_full = req.full_c)
ln_mvout_cmd_rs2.local_addr.norm_cmd := ln_norm_cmd
ln_mvout_cmd.rs2 := ln_mvout_cmd_rs2.asUInt
io.req.ready := state === idle
io.j := j
io.i := i
io.idle := state === idle
// The order here is k, j, i when not doing LAYERNORM or SOFTMAX
val ex_ahead = WireInit(io.ex_completed ||
((req.act =/= Activation.LAYERNORM) && (req.act =/= Activation.SOFTMAX) &&
(io.ex_k === req.max_k - 1.U &&
(io.ex_j >= j + blocks ||
((io.ex_j === j + blocks - 1.U) && io.ex_i > i)))))
when(req.is_resadd){
ex_ahead := io.ex_completed || (io.ex_i > i || (io.ex_i === i && io.ex_j >= j + blocks))
}
io.cmd.valid := state =/= idle && !io.rob_overloaded && ex_ahead && req.dram_addr =/= 0.U
io.cmd.bits := MuxCase(mvout_cmd, Seq(
(state === ln_config) -> ln_config_norm,
(state === ln_st) -> ln_mvout_cmd,
))
io.loop_id := req.loop_id
when (req.dram_addr === 0.U) {
state := idle
}.elsewhen (io.cmd.fire && state === st) {
// The order here is k, j, i
val next_i = floorAdd(i, 1.U, req.max_i)
val next_j = floorAdd(j, max_blocks, req.max_j, next_i === 0.U)
i := next_i
j := next_j
when (next_i === 0.U && next_j === 0.U) {
state := idle
}
}.elsewhen (io.cmd.fire && state === ln_config) {
state := ln_st
}.elsewhen (io.cmd.fire && state === ln_st) {
val next_j = floorAdd(j, max_blocks, req.max_j)
val next_stat_id = floorAdd(ln_stat_id, 1.U, ln_stat_ids, next_j === 0.U)
val next_cmd = floorAdd(ln_cmd, 1.U, ln_norm_cmds.size.U, next_j === 0.U && next_stat_id === 0.U)
val next_row = floorAdd(ln_row, NORM_STAT_IDS.U, rows, next_j === 0.U && next_stat_id === 0.U && next_cmd === 0.U)
val next_i = floorAdd(i, 1.U, req.max_i,
next_j === 0.U && next_stat_id === 0.U && next_cmd === 0.U && next_row === 0.U)
j := next_j
ln_stat_id := next_stat_id
ln_cmd := next_cmd
ln_row := next_row
i := next_i
when (next_i === 0.U && next_row === 0.U && next_cmd === 0.U && next_stat_id === 0.U && next_j === 0.U) {
state := idle
}.elsewhen (next_j === 0.U) {
state := ln_config
}
}
when (io.req.fire) {
req := io.req.bits
state := Mux((io.req.bits.act === Activation.LAYERNORM) || (io.req.bits.act === Activation.SOFTMAX), ln_config, st)
j := 0.U
i := 0.U
ln_row := 0.U
ln_cmd := 0.U
ln_stat_id := 0.U
}
}
// Combined loop
class LoopMatmulState(val iterator_bitwidth: Int, val coreMaxAddrBits: Int, val max_addr: Int, val max_acc_addr: Int) extends Bundle {
val max_k = UInt(iterator_bitwidth.W)
val max_j = UInt(iterator_bitwidth.W)
val max_i = UInt(iterator_bitwidth.W)
val pad_k = UInt(iterator_bitwidth.W)
val pad_j = UInt(iterator_bitwidth.W)
val pad_i = UInt(iterator_bitwidth.W)
val a_dram_addr = UInt(coreMaxAddrBits.W)
val b_dram_addr = UInt(coreMaxAddrBits.W)
val d_dram_addr = UInt(coreMaxAddrBits.W)
val c_dram_addr = UInt(coreMaxAddrBits.W)
val a_dram_stride = UInt(coreMaxAddrBits.W)
val b_dram_stride = UInt(coreMaxAddrBits.W)
val d_dram_stride = UInt(coreMaxAddrBits.W)
val c_dram_stride = UInt(coreMaxAddrBits.W)
val a_transpose = Bool()
val b_transpose = Bool()
val act = UInt(Activation.bitwidth.W)
val low_d = Bool()
val full_c = Bool()
val ex_accumulate = Bool()
val a_ex_spad_id = UInt(2.W)
val b_ex_spad_id = UInt(2.W)
val configured = Bool()
val running = Bool()
val lda_started = Bool()
val ldb_started = Bool()
val ex_started = Bool()
val ldd_started = Bool()
val st_started = Bool()
val lda_completed = Bool()
val ldb_completed = Bool()
val ex_completed = Bool()
val ldd_completed = Bool()
val st_completed = Bool()
def all_completed(dummy: Int=0): Bool = lda_completed && ldb_completed && ldd_completed && ex_completed && st_completed
val a_addr_start = UInt(log2Up(max_addr).W)
val b_addr_end = UInt(log2Up(max_addr+1).W)
val resadd_addr_start = UInt(log2Up(max_acc_addr).W)
def reset(): Unit = {
configured := false.B
running := false.B
lda_started := false.B
ldb_started := false.B
ex_started := false.B
ldd_started := false.B
st_started := false.B
lda_completed := false.B
ldb_completed := false.B
ex_completed := false.B
ldd_completed := false.B
st_completed := false.B
//is_resadd := false.B
}
}
class LoopMatmul(block_size: Int, coreMaxAddrBits: Int, reservation_station_size: Int, max_lds: Int, max_exs: Int, max_sts: Int,
max_addr: Int, max_acc_addr: Int, input_w: Int, acc_w: Int, dma_max_bytes: Int,
mvin_rs2_t: MvinRs2, preload_rs1_t: PreloadRs, preload_rs2_t: PreloadRs,
compute_rs1_t: ComputeRs, compute_rs2_t: ComputeRs, mvout_rs2_t: MvoutRs2)
(implicit p: Parameters) extends Module {
val iterator_bitwidth = 16
val max_block_len = (dma_max_bytes / (block_size * input_w / 8)) max 1
val max_block_len_acc = (dma_max_bytes / (block_size * acc_w / 8)) max 1
val io = IO(new Bundle {
val in = Flipped(Decoupled(new GemminiCmd(reservation_station_size)))
val out = Decoupled(new GemminiCmd(reservation_station_size))
val ld_completed = Input(UInt(log2Up(reservation_station_size+1).W))
val st_completed = Input(UInt(log2Up(reservation_station_size+1).W))
val ex_completed = Input(UInt(log2Up(reservation_station_size+1).W))
val busy = Output(Bool())
})
// Create states
val concurrent_loops = 2
val loops = Reg(Vec(concurrent_loops, new LoopMatmulState(iterator_bitwidth, coreMaxAddrBits, max_addr, max_acc_addr)))
val head_loop_id = Reg(UInt(log2Up(concurrent_loops).W))
val tail_loop_id = (~head_loop_id).asUInt // This is the loop that we always try to configure if available
val head_loop = loops(head_loop_id)
val tail_loop = loops(tail_loop_id)
val loop_configured = loops.map(_.configured).reduce(_ || _)
val loop_being_configured_id = Mux(head_loop.configured, tail_loop_id, head_loop_id)
val loop_being_configured = loops(loop_being_configured_id)
val is_resadd = RegInit(false.B)
val max_all_addr = if(max_addr > max_acc_addr) max_addr else max_acc_addr
// Create inner modules
val ldA = Module(new LoopMatmulLdA(block_size, coreMaxAddrBits, iterator_bitwidth, max_all_addr, input_w, max_block_len, concurrent_loops, mvin_rs2_t))
val ldB = Module(new LoopMatmulLdB(block_size, coreMaxAddrBits, iterator_bitwidth, max_all_addr, input_w, max_block_len, concurrent_loops, mvin_rs2_t))
val ldD = Module(new LoopMatmulLdD(block_size, coreMaxAddrBits, iterator_bitwidth, max_acc_addr, input_w, acc_w, max_block_len, max_block_len_acc, concurrent_loops, mvin_rs2_t))
val ex = Module(new LoopMatmulExecute(block_size, coreMaxAddrBits, iterator_bitwidth, max_addr, max_acc_addr, concurrent_loops, preload_rs1_t, preload_rs2_t, compute_rs1_t, compute_rs2_t))
val stC = Module(new LoopMatmulStC(block_size, coreMaxAddrBits, iterator_bitwidth, max_acc_addr, input_w, acc_w, max_block_len, concurrent_loops, mvout_rs2_t))
// Create command queue
val cmd = Queue(io.in)
io.busy := cmd.valid || loop_configured
// Create ld arbiters
val ldab_arb = Module(new WeightedArbiter(new RoCCCommand(), maxWeightA=255, staticWeightAEnabled=true)) // TODO magic numbers
ldab_arb.io.inA <> ldA.io.cmd
ldab_arb.io.inB <> ldB.io.cmd
val ab_loads_on_same_loop = ldA.io.loop_id === ldB.io.loop_id
val forceA = !ab_loads_on_same_loop && ldA.io.loop_id === head_loop_id
val forceB = !ab_loads_on_same_loop && ldB.io.loop_id === head_loop_id
ldab_arb.io.forceA := Mux(is_resadd, ab_loads_on_same_loop && !ldA.io.idle, forceA)
ldab_arb.io.forceB := Mux(is_resadd, forceB || ldA.io.idle, forceB)
ldab_arb.io.weightA := 0.U
ldab_arb.io.inA_idle := ldA.io.idle
ldab_arb.io.inB_idle := ldB.io.idle
ldab_arb.io.inA_k := ldA.io.k
ldab_arb.io.inA_i := ldA.io.i
ldab_arb.io.inB_k := ldB.io.k
ldab_arb.io.inB_j := ldB.io.j
// Create global arbiter
val arb = Module(new Arbiter(new RoCCCommand(), 4))
arb.io.in(0) <> stC.io.cmd
arb.io.in(1) <> ex.io.cmd
arb.io.in(2) <> ldD.io.cmd
arb.io.in(3) <> ldab_arb.io.out
val unrolled_cmd = arb.io.out
// Create reservation station utilization counters
val ld_utilization = RegInit(0.U(log2Up(max_lds+1).W))
val st_utilization = RegInit(0.U(log2Up(max_sts+1).W))
val ex_utilization = RegInit(0.U(log2Up(max_exs+1).W))
ld_utilization := ld_utilization +& (ldA.io.cmd.fire || ldB.io.cmd.fire || ldD.io.cmd.fire) -& io.ld_completed
st_utilization := st_utilization +& stC.io.cmd.fire -& io.st_completed
ex_utilization := ex_utilization +& ex.io.cmd.fire -& io.ex_completed
assert(ld_utilization >= io.ld_completed, "ld utilization underflow")
assert(st_utilization >= io.st_completed, "st utilization underflow")
assert(ex_utilization >= io.ex_completed, "ex utilization underflow")
// Wire up unrolled command output
val is_loop_run_cmd = cmd.bits.cmd.inst.funct === LOOP_WS
val is_loop_config_cmd = cmd.bits.cmd.inst.funct >= LOOP_WS_CONFIG_BOUNDS && cmd.bits.cmd.inst.funct <= LOOP_WS_CONFIG_STRIDES_DC
val is_loop_cmd = is_loop_run_cmd || is_loop_config_cmd
io.out.bits.cmd := Mux(loop_configured, unrolled_cmd.bits, cmd.bits.cmd)
io.out.bits.cmd.status := cmd.bits.cmd.status // TODO This is not guaranteed to be the correct fix! We must fix this
io.out.bits.rob_id := DontCare
io.out.bits.from_matmul_fsm := Mux(loop_configured, true.B, cmd.bits.from_matmul_fsm)
io.out.bits.from_conv_fsm := Mux(loop_configured, false.B, cmd.bits.from_conv_fsm)
io.out.valid := Mux(loop_configured, unrolled_cmd.valid, cmd.valid && !is_loop_config_cmd && !is_loop_run_cmd)
cmd.ready := Mux(is_loop_cmd, !loop_being_configured.configured, !loop_configured && io.out.ready)
arb.io.out.ready := io.out.ready
// Wire up overloaded signals
ldA.io.rob_overloaded := ld_utilization >= max_lds.U
ldB.io.rob_overloaded := ld_utilization >= max_lds.U
ex.io.rob_overloaded := ex_utilization >= max_exs.U
ldD.io.rob_overloaded := ld_utilization >= max_lds.U
stC.io.rob_overloaded := st_utilization >= max_sts.U
// Wire up iterator inputs
ex.io.lda_completed := (ldA.io.loop_id =/= ex.io.loop_id) || ldA.io.idle
ex.io.ldb_completed := (ldB.io.loop_id =/= ex.io.loop_id) || ldB.io.idle
ex.io.ldd_completed := (ldD.io.loop_id =/= ex.io.loop_id) || ldD.io.idle
ex.io.ld_ka := ldA.io.k
ex.io.ld_kb := ldB.io.k
ex.io.ld_j := ldB.io.j
ex.io.ld_i := ldA.io.i
stC.io.ex_completed := (ex.io.loop_id =/= stC.io.loop_id) || ex.io.idle
stC.io.ex_k := ex.io.k
stC.io.ex_j := ex.io.j
stC.io.ex_i := ex.io.i
// when loop matmul is used as resadd unroller
// skip ex
// track ldB instead of ex
when(is_resadd){
stC.io.ex_completed := (ldA.io.loop_id =/= stC.io.loop_id || ldA.io.idle) && (ldB.io.loop_id =/= stC.io.loop_id || ldB.io.idle)
stC.io.ex_k := 0.U // req.max_k shall be 1
stC.io.ex_j := ldB.io.j
stC.io.ex_i := ldB.io.k
//ldB.io.rob_overloaded := ld_utilization >= max_lds.U || !((ldA.io.loop_id =/= ldB.io.loop_id) || ldA.io.idle)
}
val loops_configured = RegInit(0.U(16.W))
dontTouch(loops_configured)
// Create config registers
when(cmd.valid && is_loop_cmd && !loop_being_configured.configured) {
switch (cmd.bits.cmd.inst.funct) {
is (LOOP_WS_CONFIG_BOUNDS) {
loop_being_configured.max_k := cmd.bits.cmd.rs2(iterator_bitwidth * 3 - 1, iterator_bitwidth * 2)
loop_being_configured.max_j := cmd.bits.cmd.rs2(iterator_bitwidth * 2 - 1, iterator_bitwidth)
loop_being_configured.max_i := cmd.bits.cmd.rs2(iterator_bitwidth-1, 0)
loop_being_configured.pad_k := cmd.bits.cmd.rs1(iterator_bitwidth * 3 - 1, iterator_bitwidth * 2)
loop_being_configured.pad_j := cmd.bits.cmd.rs1(iterator_bitwidth * 2 - 1, iterator_bitwidth)
loop_being_configured.pad_i := cmd.bits.cmd.rs1(iterator_bitwidth-1, 0)
}
is (LOOP_WS_CONFIG_ADDRS_AB) {
loop_being_configured.a_dram_addr := cmd.bits.cmd.rs1
loop_being_configured.b_dram_addr := cmd.bits.cmd.rs2
}
is (LOOP_WS_CONFIG_ADDRS_DC) {
loop_being_configured.d_dram_addr := cmd.bits.cmd.rs1
loop_being_configured.c_dram_addr := cmd.bits.cmd.rs2
}
is (LOOP_WS_CONFIG_STRIDES_AB) {
loop_being_configured.a_dram_stride := cmd.bits.cmd.rs1
loop_being_configured.b_dram_stride := cmd.bits.cmd.rs2
}
is (LOOP_WS_CONFIG_STRIDES_DC) {
loop_being_configured.d_dram_stride := cmd.bits.cmd.rs1
loop_being_configured.c_dram_stride := cmd.bits.cmd.rs2
}
is (LOOP_WS) {
loop_being_configured.ex_accumulate := cmd.bits.cmd.rs1(0)
loop_being_configured.full_c := cmd.bits.cmd.rs1(1)
loop_being_configured.low_d := cmd.bits.cmd.rs1(2)
loop_being_configured.act := cmd.bits.cmd.rs1(8+Activation.bitwidth-1, 8) // TODO magic numbers
loop_being_configured.a_ex_spad_id := cmd.bits.cmd.rs1(19, 18)
loop_being_configured.b_ex_spad_id := cmd.bits.cmd.rs1(17, 16)
loop_being_configured.a_transpose := cmd.bits.cmd.rs2(0)
loop_being_configured.b_transpose := cmd.bits.cmd.rs2(1)
is_resadd := cmd.bits.cmd.rs2(2)
loop_being_configured.configured := true.B
loops_configured := loops_configured + 1.U
}
}
}
// Wire up request signals
val ld_d_addr_start = RegInit(0.U(log2Up(max_acc_addr).W))
val ex_c_addr_start = RegInit(0.U(log2Up(max_acc_addr).W))
val st_c_addr_start = RegInit(0.U(log2Up(max_acc_addr).W))
val loop_requesting_ldA_id = Mux(head_loop.lda_started, tail_loop_id, head_loop_id)
val loop_requesting_ldA = loops(loop_requesting_ldA_id)
ldA.io.req.bits.max_k := Mux(is_resadd, loop_requesting_ldA.max_j, loop_requesting_ldA.max_k)
ldA.io.req.bits.max_i := loop_requesting_ldA.max_i
ldA.io.req.bits.pad_k := Mux(is_resadd, loop_requesting_ldA.pad_j, loop_requesting_ldA.pad_k)
ldA.io.req.bits.pad_i := loop_requesting_ldA.pad_i
ldA.io.req.bits.dram_addr := loop_requesting_ldA.a_dram_addr
ldA.io.req.bits.dram_stride := loop_requesting_ldA.a_dram_stride
ldA.io.req.bits.transpose := loop_requesting_ldA.a_transpose
ldA.io.req.bits.addr_start := Mux(loop_requesting_ldA.a_ex_spad_id === 0.U, loop_requesting_ldA.a_addr_start, (loop_requesting_ldA.a_ex_spad_id - 1.U) * (max_addr / concurrent_loops).U)
ldA.io.req.bits.loop_id := loop_requesting_ldA_id
ldA.io.req.bits.is_resadd := is_resadd
ldA.io.req.valid := !loop_requesting_ldA.lda_started && loop_requesting_ldA.configured
when (ldA.io.req.fire) {
loop_requesting_ldA.running := true.B
loop_requesting_ldA.lda_started := true.B
}
val loop_requesting_ldB_id = Mux(head_loop.ldb_started, tail_loop_id, head_loop_id)
val loop_requesting_ldB = loops(loop_requesting_ldB_id)
ldB.io.req.bits.max_j := loop_requesting_ldB.max_j
ldB.io.req.bits.max_k := Mux(is_resadd, loop_requesting_ldB.max_i, loop_requesting_ldB.max_k)
ldB.io.req.bits.pad_j := loop_requesting_ldB.pad_j
ldB.io.req.bits.pad_k := Mux(is_resadd, loop_requesting_ldB.pad_i, loop_requesting_ldB.pad_k)
ldB.io.req.bits.dram_addr := loop_requesting_ldB.b_dram_addr
ldB.io.req.bits.dram_stride := loop_requesting_ldB.b_dram_stride
ldB.io.req.bits.transpose := loop_requesting_ldB.b_transpose
ldB.io.req.bits.addr_end := Mux(loop_requesting_ldB.b_ex_spad_id === 0.U, loop_requesting_ldB.b_addr_end, (loop_requesting_ldB.b_ex_spad_id) * (max_addr / concurrent_loops).U)
ldB.io.req.bits.loop_id := loop_requesting_ldB_id
ldB.io.req.bits.is_resadd := is_resadd
ldB.io.req.valid := !loop_requesting_ldB.ldb_started && loop_requesting_ldB.configured
when (ldB.io.req.fire) {
loop_requesting_ldB.running := true.B
loop_requesting_ldB.ldb_started := true.B
}
val loop_requesting_ex_id = Mux(head_loop.ex_started, tail_loop_id, head_loop_id)
val loop_requesting_ex = loops(loop_requesting_ex_id)
ex.io.req.bits.max_j := loop_requesting_ex.max_j
ex.io.req.bits.max_k := loop_requesting_ex.max_k
ex.io.req.bits.max_i := loop_requesting_ex.max_i
ex.io.req.bits.pad_j := loop_requesting_ex.pad_j
ex.io.req.bits.pad_k := loop_requesting_ex.pad_k
ex.io.req.bits.pad_i := loop_requesting_ex.pad_i
ex.io.req.bits.accumulate := loop_requesting_ex.ex_accumulate
ex.io.req.bits.a_addr_start := Mux(loop_requesting_ex.a_ex_spad_id === 0.U, loop_requesting_ex.a_addr_start, (loop_requesting_ex.a_ex_spad_id - 1.U) * (max_addr / concurrent_loops).U)
ex.io.req.bits.b_addr_end := Mux(loop_requesting_ex.b_ex_spad_id === 0.U, loop_requesting_ex.b_addr_end, (loop_requesting_ex.b_ex_spad_id) * (max_addr / concurrent_loops).U)
ex.io.req.bits.a_tranpose := loop_requesting_ex.a_transpose
ex.io.req.bits.b_tranpose := loop_requesting_ex.b_transpose
ex.io.req.bits.c_addr_start := ex_c_addr_start
ex.io.req.bits.loop_id := loop_requesting_ex_id
ex.io.req.bits.skip := is_resadd
ex.io.req.valid := !loop_requesting_ex.ex_started && loop_requesting_ex.lda_started &&
loop_requesting_ex.ldb_started && loop_requesting_ex.ldd_started && loop_requesting_ex.configured
when (ex.io.req.fire) {
loop_requesting_ex.running := true.B
loop_requesting_ex.ex_started := true.B
when (loop_requesting_ex.c_dram_addr =/= 0.U) {
ex_c_addr_start := floorAdd(ex_c_addr_start, (max_acc_addr / concurrent_loops).U, max_acc_addr.U)
}
}
val loop_requesting_ldD_id = Mux(head_loop.ldd_started, tail_loop_id, head_loop_id)
val loop_requesting_ldD = loops(loop_requesting_ldD_id)
ldD.io.req.bits.max_j := loop_requesting_ldD.max_j
ldD.io.req.bits.max_i := loop_requesting_ldD.max_i
ldD.io.req.bits.pad_j := loop_requesting_ldD.pad_j
ldD.io.req.bits.pad_i := loop_requesting_ldD.pad_i
ldD.io.req.bits.dram_addr := loop_requesting_ldD.d_dram_addr
ldD.io.req.bits.dram_stride := loop_requesting_ldD.d_dram_stride
ldD.io.req.bits.low_d := loop_requesting_ldD.low_d
ldD.io.req.bits.addr_start := ld_d_addr_start
ldD.io.req.bits.loop_id := loop_requesting_ldD_id
ldD.io.req.valid := !loop_requesting_ldD.ldd_started && loop_requesting_ldD.configured
when (ldD.io.req.fire) {
loop_requesting_ldD.running := true.B
loop_requesting_ldD.ldd_started := true.B
when (loop_requesting_ldD.c_dram_addr =/= 0.U) {
ld_d_addr_start := floorAdd(ld_d_addr_start, (max_acc_addr / concurrent_loops).U, max_acc_addr.U)
}
}
val loop_requesting_st_id = Mux(head_loop.st_started, tail_loop_id, head_loop_id)
val loop_requesting_st = loops(loop_requesting_st_id)
stC.io.req.bits.max_k := Mux(is_resadd, 1.U, loop_requesting_st.max_k)
stC.io.req.bits.max_j := loop_requesting_st.max_j
stC.io.req.bits.max_i := loop_requesting_st.max_i
stC.io.req.bits.pad_j := loop_requesting_st.pad_j
stC.io.req.bits.pad_i := loop_requesting_st.pad_i
stC.io.req.bits.dram_addr := loop_requesting_st.c_dram_addr
stC.io.req.bits.dram_stride := loop_requesting_st.c_dram_stride
stC.io.req.bits.full_c := loop_requesting_st.full_c
stC.io.req.bits.act := loop_requesting_st.act
stC.io.req.bits.addr_start := st_c_addr_start
stC.io.req.bits.loop_id := loop_requesting_st_id
stC.io.req.bits.is_resadd := is_resadd
stC.io.req.valid := !loop_requesting_st.st_started && loop_requesting_st.ex_started && loop_requesting_st.configured
when (stC.io.req.fire) {
loop_requesting_st.running := true.B
loop_requesting_st.st_started := true.B
when (loop_requesting_st.c_dram_addr =/= 0.U) {
st_c_addr_start := floorAdd(st_c_addr_start, (max_acc_addr / concurrent_loops).U, max_acc_addr.U)
}
}
when(is_resadd){
ldA.io.req.bits.addr_start := loop_requesting_ldA.resadd_addr_start
ldB.io.req.bits.addr_end := loop_requesting_ldB.resadd_addr_start
stC.io.req.bits.addr_start := loop_requesting_st.resadd_addr_start
stC.io.req.valid := !loop_requesting_st.st_started && loop_requesting_st.configured
}
// Handle completed signals
when (ldA.io.idle && loops(ldA.io.loop_id).running && loops(ldA.io.loop_id).lda_started) {
loops(ldA.io.loop_id).lda_completed := true.B
}
when (ldB.io.idle && loops(ldB.io.loop_id).running && loops(ldB.io.loop_id).ldb_started) {
loops(ldB.io.loop_id).ldb_completed := true.B
}
when (ex.io.idle && loops(ex.io.loop_id).running && loops(ex.io.loop_id).ex_started) {
loops(ex.io.loop_id).ex_completed := true.B
}
when (ldD.io.idle && loops(ldD.io.loop_id).running && loops(ldD.io.loop_id).ldd_started) {
loops(ldD.io.loop_id).ldd_completed := true.B
}
when (stC.io.idle && loops(stC.io.loop_id).running && loops(stC.io.loop_id).st_started) {
loops(stC.io.loop_id).st_completed := true.B
}
when (head_loop.running && head_loop.all_completed()) {
head_loop.reset()
head_loop_id := ~head_loop_id
}
// Resets
when (reset.asBool) {
loops.zipWithIndex.foreach { case (l, i) =>
l.reset()
l.a_addr_start := (i * (max_addr / concurrent_loops)).U
l.b_addr_end := ((i+1) * (max_addr / concurrent_loops)).U
l.resadd_addr_start := (i * (max_acc_addr / concurrent_loops)).U
}
}
}
object LoopMatmul {
def apply(in: DecoupledIO[GemminiCmd], ld_completed: UInt, st_completed: UInt, ex_completed: UInt,
block_size: Int, coreMaxAddrBits: Int, rob_size: Int, max_lds: Int, max_exs: Int, max_sts: Int,
max_addr: Int, max_acc_addr: Int, input_w: Int, acc_w: Int, dma_max_bytes: Int,
mvin_rs2_t: MvinRs2, preload_rs1_t: PreloadRs, preload_rs2_t: PreloadRs,
compute_rs1_t: ComputeRs, compute_rs2_t: ComputeRs, mvout_rs2_t: MvoutRs2)
(implicit p: Parameters): (DecoupledIO[GemminiCmd], Bool) = {
val mod = Module(new LoopMatmul(block_size, coreMaxAddrBits, rob_size, max_lds, max_exs, max_sts,
max_addr, max_acc_addr, input_w, acc_w, dma_max_bytes,
mvin_rs2_t, preload_rs1_t, preload_rs2_t, compute_rs1_t, compute_rs2_t, mvout_rs2_t))
mod.io.in <> in
mod.io.ld_completed := ld_completed
mod.io.st_completed := st_completed
mod.io.ex_completed := ex_completed
(mod.io.out, mod.io.busy)
}
def castDramOffset(dram_offset: UInt): UInt = {
// Cast dram offsets to 32 bits max
dram_offset & "hFFFFFFFF".U
}
}
File LocalAddr.scala:
package gemmini
import chisel3._
import chisel3.util._
class LocalAddr(sp_banks: Int, sp_bank_entries: Int, acc_banks: Int, acc_bank_entries: Int) extends Bundle {
private val localAddrBits = 32 // TODO magic number
private val spAddrBits = log2Ceil(sp_banks * sp_bank_entries)
private val accAddrBits = log2Ceil(acc_banks * acc_bank_entries)
private val maxAddrBits = spAddrBits max accAddrBits
private val spBankBits = log2Up(sp_banks)
private val spBankRowBits = log2Up(sp_bank_entries)
private val accBankBits = log2Up(acc_banks)
val accBankRowBits = log2Up(acc_bank_entries)
val spRows = sp_banks * sp_bank_entries
val is_acc_addr = Bool()
val accumulate = Bool()
val read_full_acc_row = Bool()
val norm_cmd = NormCmd()
private val metadata_w = is_acc_addr.getWidth + accumulate.getWidth + read_full_acc_row.getWidth + norm_cmd.getWidth
assert(maxAddrBits + metadata_w < 32)
val garbage = UInt(((localAddrBits - maxAddrBits - metadata_w - 1) max 0).W)
val garbage_bit = if (localAddrBits - maxAddrBits >= metadata_w + 1) UInt(1.W) else UInt(0.W)
val data = UInt(maxAddrBits.W)
def sp_bank(dummy: Int = 0) = if (spAddrBits == spBankRowBits) 0.U else data(spAddrBits - 1, spBankRowBits)
def sp_row(dummy: Int = 0) = data(spBankRowBits - 1, 0)
def acc_bank(dummy: Int = 0) = if (accAddrBits == accBankRowBits) 0.U else data(accAddrBits - 1, accBankRowBits)
def acc_row(dummy: Int = 0) = data(accBankRowBits - 1, 0)
def full_sp_addr(dummy: Int = 0) = data(spAddrBits - 1, 0)
def full_acc_addr(dummy: Int = 0) = data(accAddrBits - 1, 0)
def is_same_address(other: LocalAddr): Bool = is_acc_addr === other.is_acc_addr && data === other.data
def is_same_address(other: UInt): Bool = is_same_address(other.asTypeOf(this))
def is_garbage(dummy: Int = 0) = is_acc_addr && accumulate && read_full_acc_row && data.andR &&
(if (garbage_bit.getWidth > 0) garbage_bit.asBool else true.B)
def +(other: UInt) = {
require(isPow2(sp_bank_entries)) // TODO remove this requirement
require(isPow2(acc_bank_entries)) // TODO remove this requirement
val result = WireInit(this)
result.data := data + other
result
}
def <=(other: LocalAddr) =
is_acc_addr === other.is_acc_addr &&
Mux(is_acc_addr, full_acc_addr() <= other.full_acc_addr(), full_sp_addr() <= other.full_sp_addr())
def <(other: LocalAddr) =
is_acc_addr === other.is_acc_addr &&
Mux(is_acc_addr, full_acc_addr() < other.full_acc_addr(), full_sp_addr() < other.full_sp_addr())
def >(other: LocalAddr) =
is_acc_addr === other.is_acc_addr &&
Mux(is_acc_addr, full_acc_addr() > other.full_acc_addr(), full_sp_addr() > other.full_sp_addr())
def add_with_overflow(other: UInt): Tuple2[LocalAddr, Bool] = {
require(isPow2(sp_bank_entries)) // TODO remove this requirement
require(isPow2(acc_bank_entries)) // TODO remove this requirement
val sum = data +& other
val overflow = Mux(is_acc_addr, sum(accAddrBits), sum(spAddrBits))
val result = WireInit(this)
result.data := sum(maxAddrBits - 1, 0)
(result, overflow)
}
// This function can only be used with non-accumulator addresses. Returns both new address and underflow
def floorSub(other: UInt, floor: UInt): (LocalAddr, Bool) = {
require(isPow2(sp_bank_entries)) // TODO remove this requirement
require(isPow2(acc_bank_entries)) // TODO remove this requirement
val underflow = data < (floor +& other)
val result = WireInit(this)
result.data := Mux(underflow, floor, data - other)
(result, underflow)
}
def make_this_garbage(dummy: Int = 0): Unit = {
is_acc_addr := true.B
accumulate := true.B
read_full_acc_row := true.B
garbage_bit := 1.U
data := ~(0.U(maxAddrBits.W))
}
}
object LocalAddr {
def cast_to_local_addr[T <: Data](local_addr_t: LocalAddr, t: T): LocalAddr = {
// This convenience function is basically the same as calling "asTypeOf(local_addr_t)". However, this convenience
// function will also cast unnecessary garbage bits to 0, which may help reduce multiplier/adder bitwidths
val result = WireInit(t.asTypeOf(local_addr_t))
if (result.garbage_bit.getWidth > 0) result.garbage := 0.U
result
}
def cast_to_sp_addr[T <: Data](local_addr_t: LocalAddr, t: T): LocalAddr = {
// This function is a wrapper around cast_to_local_addr, but it assumes that the input will not be the garbage
// address
val result = WireInit(cast_to_local_addr(local_addr_t, t))
result.is_acc_addr := false.B
result.accumulate := false.B
result.read_full_acc_row := false.B
// assert(!result.garbage_bit, "cast_to_sp_addr doesn't work on garbage addresses")
result
}
def cast_to_acc_addr[T <: Data](local_addr_t: LocalAddr, t: T, accumulate: Bool, read_full: Bool): LocalAddr = {
// This function is a wrapper around cast_to_local_addr, but it assumes that the input will not be the garbage
// address
val result = WireInit(cast_to_local_addr(local_addr_t, t))
result.is_acc_addr := true.B
result.accumulate := accumulate
result.read_full_acc_row := read_full
// assert(!result.garbage_bit, "cast_to_acc_addr doesn't work on garbage addresses")
result
}
def garbage_addr(local_addr_t: LocalAddr): LocalAddr = {
val result = Wire(chiselTypeOf(local_addr_t))
result := DontCare
result.make_this_garbage()
result
}
}
File Util.scala:
package gemmini
import chisel3._
import chisel3.util._
object Util {
def wrappingAdd(u: UInt, n: UInt, max_plus_one: Int): UInt = {
val max = max_plus_one - 1
if (max == 0) {
0.U
} else {
assert(n <= max.U, "cannot wrapAdd when n is larger than max")
Mux(u >= max.U - n + 1.U && n =/= 0.U, n - (max.U - u) - 1.U, u + n)
}
}
def wrappingAdd(u: UInt, n: UInt, max_plus_one: UInt, en: Bool = true.B): UInt = {
val max = max_plus_one - 1.U
assert(n <= max || max === 0.U, "cannot wrapAdd when n is larger than max, unless max is 0")
/*
Mux(!en, u,
Mux (max === 0.U, 0.U,
Mux(u >= max - n + 1.U && n =/= 0.U, n - (max - u) - 1.U, u + n)))
*/
MuxCase(u + n, Seq(
(!en) -> u,
(max === 0.U) -> 0.U,
(u >= max - n + 1.U && n =/= 0.U) -> (n - (max - u) - 1.U)
))
}
def satAdd(u: UInt, v: UInt, max: UInt): UInt = {
Mux(u +& v > max, max, u + v)
}
def floorAdd(u: UInt, n: UInt, max_plus_one: UInt, en: Bool = true.B): UInt = {
val max = max_plus_one - 1.U
MuxCase(u + n, Seq(
(!en) -> u,
((u +& n) > max) -> 0.U
))
}
def sFloorAdd(s: SInt, n: UInt, max_plus_one: SInt, min: SInt, en: Bool = true.B): SInt = {
val max = max_plus_one - 1.S
MuxCase(s + n.zext, Seq(
(!en) -> s,
((s +& n.zext) > max) -> min
))
}
def wrappingSub(u: UInt, n: UInt, max_plus_one: Int): UInt = {
val max = max_plus_one - 1
assert(n <= max.U, "cannot wrapSub when n is larger than max")
Mux(u < n, max.U - (n-u) + 1.U, u - n)
}
def ceilingDivide(numer: Int, denom: Int): Int = {
if (numer % denom == 0) { numer / denom }
else { numer / denom + 1}
}
def closestLowerPowerOf2(u: UInt): UInt = {
// TODO figure out a more efficient way of doing this. Is this many muxes really necessary?
val exp = u.asBools.zipWithIndex.map { case (b, i) =>
Mux(b, i.U, 0.U)
}.reduce((acc, u) => Mux(acc > u, acc, u))
(1.U << exp).asUInt
}
def closestAlignedLowerPowerOf2(u: UInt, addr: UInt, stride: UInt, rowBytes: Int): UInt = {
val lgRowBytes = log2Ceil(rowBytes)
// TODO figure out a more efficient way of doing this. Is this many muxes really necessary?
val exp = u.asBools.zipWithIndex.map { case (b, i) =>
Mux(b && addr(i + lgRowBytes - 1, 0) === 0.U && stride(i + lgRowBytes - 1, 0) === 0.U, i.U, 0.U)
}.reduce((acc, u) => Mux(acc > u, acc, u))
(1.U << exp).asUInt
}
// This function will return "next" with a 0-cycle delay when the "enable" signal is high. It's like a queue with
// the "pipe" and "flow" parameters set to "true"
def RegEnableThru[T <: Data](next: T, enable: Bool): T = {
val buf = RegEnable(next, enable)
Mux(enable, next, buf)
}
def RegEnableThru[T <: Data](next: T, init: T, enable: Bool): T = {
val buf = RegEnable(next, init, enable)
Mux(enable, next, buf)
}
def maxOf(u1: UInt, u2: UInt): UInt = {
Mux(u1 > u2, u1, u2)
}
def maxOf[T <: Data](x: T, y: T)(implicit ev: Arithmetic[T]): T = {
import ev._
Mux(x > y, x, y)
}
def minOf(u1: UInt, u2: UInt): UInt = {
Mux(u1 < u2, u1, u2)
}
def accumulateTree[T <: Data](xs: Seq[T])(implicit ev: Arithmetic[T]): T = {
import ev._
assert(xs.nonEmpty, "can't accumulate 0 elements")
if (xs.length == 1) {
xs.head
} else {
val upperRowLen = 1 << log2Ceil(xs.length)
val upperRow = xs.padTo(upperRowLen, xs.head.zero)
val pairs = upperRow.grouped(2)
val lowerRow = pairs.map { case Seq(a, b) => a + b }
accumulateTree(lowerRow.toSeq)
}
}
// An undirectioned Valid bundle
class UDValid[T <: Data](t: T) extends Bundle {
val valid = Bool()
val bits = t.cloneType
def push(b: T): Unit = {
valid := true.B
bits := b
}
def pop(dummy: Int = 0): T = {
valid := false.B
bits
}
}
object UDValid {
def apply[T <: Data](t: T): UDValid[T] = new UDValid(t)
}
// creates a Reg and the next-state Wire, and returns both
def regwire(bits: Int) = {
val wire = Wire(UInt(bits.W))
val reg = RegNext(wire)
wire := reg // default wire to read from reg
(reg, wire)
}
}
| module LoopMatmulStC( // @[LoopMatmul.scala:514:7]
input clock, // @[LoopMatmul.scala:514:7]
input reset, // @[LoopMatmul.scala:514:7]
output io_req_ready, // @[LoopMatmul.scala:516:14]
input io_req_valid, // @[LoopMatmul.scala:516:14]
input [15:0] io_req_bits_max_k, // @[LoopMatmul.scala:516:14]
input [15:0] io_req_bits_max_j, // @[LoopMatmul.scala:516:14]
input [15:0] io_req_bits_max_i, // @[LoopMatmul.scala:516:14]
input [3:0] io_req_bits_pad_j, // @[LoopMatmul.scala:516:14]
input [3:0] io_req_bits_pad_i, // @[LoopMatmul.scala:516:14]
input [39:0] io_req_bits_dram_addr, // @[LoopMatmul.scala:516:14]
input [39:0] io_req_bits_dram_stride, // @[LoopMatmul.scala:516:14]
input io_req_bits_full_c, // @[LoopMatmul.scala:516:14]
input [2:0] io_req_bits_act, // @[LoopMatmul.scala:516:14]
input [9:0] io_req_bits_addr_start, // @[LoopMatmul.scala:516:14]
input io_req_bits_loop_id, // @[LoopMatmul.scala:516:14]
input io_req_bits_is_resadd, // @[LoopMatmul.scala:516:14]
input io_cmd_ready, // @[LoopMatmul.scala:516:14]
output io_cmd_valid, // @[LoopMatmul.scala:516:14]
output [6:0] io_cmd_bits_inst_funct, // @[LoopMatmul.scala:516:14]
output [63:0] io_cmd_bits_rs1, // @[LoopMatmul.scala:516:14]
output [63:0] io_cmd_bits_rs2, // @[LoopMatmul.scala:516:14]
input [15:0] io_ex_k, // @[LoopMatmul.scala:516:14]
input [15:0] io_ex_j, // @[LoopMatmul.scala:516:14]
input [15:0] io_ex_i, // @[LoopMatmul.scala:516:14]
input io_ex_completed, // @[LoopMatmul.scala:516:14]
output io_idle, // @[LoopMatmul.scala:516:14]
input io_rob_overloaded, // @[LoopMatmul.scala:516:14]
output io_loop_id // @[LoopMatmul.scala:516:14]
);
wire _ln_mvout_cmd_rs2_local_addr_result_result_WIRE_is_acc_addr; // @[LocalAddr.scala:108:37]
wire _ln_mvout_cmd_rs2_local_addr_result_result_WIRE_accumulate; // @[LocalAddr.scala:108:37]
wire _ln_mvout_cmd_rs2_local_addr_result_result_WIRE_read_full_acc_row; // @[LocalAddr.scala:108:37]
wire [2:0] _ln_mvout_cmd_rs2_local_addr_result_result_WIRE_norm_cmd; // @[LocalAddr.scala:108:37]
wire _ln_mvout_cmd_rs2_local_addr_result_result_WIRE_garbage_bit; // @[LocalAddr.scala:108:37]
wire [13:0] _ln_mvout_cmd_rs2_local_addr_result_result_WIRE_data; // @[LocalAddr.scala:108:37]
wire _mvout_cmd_rs2_local_addr_result_result_WIRE_is_acc_addr; // @[LocalAddr.scala:108:37]
wire _mvout_cmd_rs2_local_addr_result_result_WIRE_accumulate; // @[LocalAddr.scala:108:37]
wire _mvout_cmd_rs2_local_addr_result_result_WIRE_read_full_acc_row; // @[LocalAddr.scala:108:37]
wire [2:0] _mvout_cmd_rs2_local_addr_result_result_WIRE_norm_cmd; // @[LocalAddr.scala:108:37]
wire _mvout_cmd_rs2_local_addr_result_result_WIRE_garbage_bit; // @[LocalAddr.scala:108:37]
wire [13:0] _mvout_cmd_rs2_local_addr_result_result_WIRE_data; // @[LocalAddr.scala:108:37]
wire [6:0] mvout_cmd_rs2_num_cols; // @[LoopMatmul.scala:563:27]
wire [2:0] mvout_cmd_rs2_local_addr_norm_cmd; // @[LoopMatmul.scala:563:27]
wire io_req_valid_0 = io_req_valid; // @[LoopMatmul.scala:514:7]
wire [15:0] io_req_bits_max_k_0 = io_req_bits_max_k; // @[LoopMatmul.scala:514:7]
wire [15:0] io_req_bits_max_j_0 = io_req_bits_max_j; // @[LoopMatmul.scala:514:7]
wire [15:0] io_req_bits_max_i_0 = io_req_bits_max_i; // @[LoopMatmul.scala:514:7]
wire [3:0] io_req_bits_pad_j_0 = io_req_bits_pad_j; // @[LoopMatmul.scala:514:7]
wire [3:0] io_req_bits_pad_i_0 = io_req_bits_pad_i; // @[LoopMatmul.scala:514:7]
wire [39:0] io_req_bits_dram_addr_0 = io_req_bits_dram_addr; // @[LoopMatmul.scala:514:7]
wire [39:0] io_req_bits_dram_stride_0 = io_req_bits_dram_stride; // @[LoopMatmul.scala:514:7]
wire io_req_bits_full_c_0 = io_req_bits_full_c; // @[LoopMatmul.scala:514:7]
wire [2:0] io_req_bits_act_0 = io_req_bits_act; // @[LoopMatmul.scala:514:7]
wire [9:0] io_req_bits_addr_start_0 = io_req_bits_addr_start; // @[LoopMatmul.scala:514:7]
wire io_req_bits_loop_id_0 = io_req_bits_loop_id; // @[LoopMatmul.scala:514:7]
wire io_req_bits_is_resadd_0 = io_req_bits_is_resadd; // @[LoopMatmul.scala:514:7]
wire io_cmd_ready_0 = io_cmd_ready; // @[LoopMatmul.scala:514:7]
wire [15:0] io_ex_k_0 = io_ex_k; // @[LoopMatmul.scala:514:7]
wire [15:0] io_ex_j_0 = io_ex_j; // @[LoopMatmul.scala:514:7]
wire [15:0] io_ex_i_0 = io_ex_i; // @[LoopMatmul.scala:514:7]
wire io_ex_completed_0 = io_ex_completed; // @[LoopMatmul.scala:514:7]
wire io_rob_overloaded_0 = io_rob_overloaded; // @[LoopMatmul.scala:514:7]
wire [3:0][2:0] _GEN = '{3'h5, 3'h0, 3'h6, 3'h5};
wire [3:0][2:0] _GEN_0 = '{3'h1, 3'h0, 3'h3, 3'h1};
wire [3:0][2:0] _GEN_1 = '{3'h5, 3'h0, 3'h7, 3'h5};
wire [3:0][2:0] _GEN_2 = '{3'h2, 3'h0, 3'h4, 3'h2};
wire [2:0] _ln_norm_cmds_WIRE_0 = 3'h1; // @[LoopMatmul.scala:577:37]
wire [2:0] ln_norm_cmds_0_0 = 3'h1; // @[LoopMatmul.scala:577:29]
wire [2:0] _ln_norm_cmds_WIRE_1_0 = 3'h3; // @[LoopMatmul.scala:577:73]
wire [2:0] ln_norm_cmds_1_0 = 3'h3; // @[LoopMatmul.scala:577:29]
wire [2:0] _ln_norm_cmds_WIRE_1_1 = 3'h4; // @[LoopMatmul.scala:577:73]
wire [2:0] ln_norm_cmds_1_1 = 3'h4; // @[LoopMatmul.scala:577:29]
wire [2:0] _sm_norm_cmds_WIRE_0 = 3'h5; // @[LoopMatmul.scala:580:37]
wire [2:0] _sm_norm_cmds_WIRE_1 = 3'h5; // @[LoopMatmul.scala:580:37]
wire [2:0] sm_norm_cmds_0_0 = 3'h5; // @[LoopMatmul.scala:580:29]
wire [2:0] sm_norm_cmds_0_1 = 3'h5; // @[LoopMatmul.scala:580:29]
wire [2:0] _sm_norm_cmds_WIRE_1_0 = 3'h6; // @[LoopMatmul.scala:580:72]
wire [2:0] sm_norm_cmds_1_0 = 3'h6; // @[LoopMatmul.scala:580:29]
wire [2:0] _sm_norm_cmds_WIRE_1_1 = 3'h7; // @[LoopMatmul.scala:580:72]
wire [2:0] sm_norm_cmds_1_1 = 3'h7; // @[LoopMatmul.scala:580:29]
wire [2:0] _ln_norm_cmds_WIRE_2_0 = 3'h0; // @[LoopMatmul.scala:578:12]
wire [2:0] _ln_norm_cmds_WIRE_2_1 = 3'h0; // @[LoopMatmul.scala:578:12]
wire [2:0] ln_norm_cmds_2_0 = 3'h0; // @[LoopMatmul.scala:577:29]
wire [2:0] ln_norm_cmds_2_1 = 3'h0; // @[LoopMatmul.scala:577:29]
wire [2:0] _sm_norm_cmds_WIRE_2_0 = 3'h0; // @[LoopMatmul.scala:581:12]
wire [2:0] _sm_norm_cmds_WIRE_2_1 = 3'h0; // @[LoopMatmul.scala:581:12]
wire [2:0] sm_norm_cmds_2_0 = 3'h0; // @[LoopMatmul.scala:580:29]
wire [2:0] sm_norm_cmds_2_1 = 3'h0; // @[LoopMatmul.scala:580:29]
wire [12:0] ln_config_norm_rs1__spacer1 = 13'h0; // @[LoopMatmul.scala:596:32]
wire [5:0] ln_config_norm_rs1__spacer0 = 6'h0; // @[LoopMatmul.scala:596:32]
wire [1:0] ln_config_norm_rs1_cmd_type = 2'h3; // @[LoopMatmul.scala:596:32]
wire [63:0] ln_config_norm_rs2 = 64'h0; // @[LoopMatmul.scala:602:28]
wire [7:0] ln_config_norm_rs1_lo_lo = 8'h3; // @[LoopMatmul.scala:605:44]
wire [1:0] ln_config_norm_rs1_hi_lo = 2'h1; // @[LoopMatmul.scala:605:44, :646:36]
wire [44:0] ln_config_norm_rs1_hi_hi = 45'h0; // @[LoopMatmul.scala:605:44]
wire [46:0] ln_config_norm_rs1_hi = 47'h1; // @[LoopMatmul.scala:605:44]
wire [4:0] ln_mvout_cmd_rs2_num_rows = 5'h1; // @[LoopMatmul.scala:613:30]
wire [8:0] mvout_cmd_rs2__spacer1 = 9'h0; // @[LoopMatmul.scala:563:27]
wire [8:0] ln_mvout_cmd_rs2__spacer1 = 9'h0; // @[LoopMatmul.scala:613:30]
wire mvout_cmd_rs2_local_addr_is_acc_addr = 1'h1; // @[LoopMatmul.scala:563:27]
wire mvout_cmd_rs2_local_addr_result_is_acc_addr = 1'h1; // @[LocalAddr.scala:129:26]
wire ln_config_norm_rs1_set_stats_id_only = 1'h1; // @[LoopMatmul.scala:596:32]
wire ln_mvout_cmd_rs2_local_addr_is_acc_addr = 1'h1; // @[LoopMatmul.scala:613:30]
wire ln_mvout_cmd_rs2_local_addr_result_is_acc_addr = 1'h1; // @[LocalAddr.scala:129:26]
wire [10:0] mvout_cmd_rs2__spacer2 = 11'h0; // @[LoopMatmul.scala:563:27]
wire [10:0] mvout_cmd_rs2_local_addr_garbage = 11'h0; // @[LoopMatmul.scala:563:27]
wire [10:0] mvout_cmd_rs2_local_addr_result_result_garbage = 11'h0; // @[LocalAddr.scala:108:26]
wire [10:0] mvout_cmd_rs2_local_addr_result_garbage = 11'h0; // @[LocalAddr.scala:129:26]
wire [10:0] ln_mvout_cmd_rs2__spacer2 = 11'h0; // @[LoopMatmul.scala:613:30]
wire [10:0] ln_mvout_cmd_rs2_local_addr_garbage = 11'h0; // @[LoopMatmul.scala:613:30]
wire [10:0] ln_mvout_cmd_rs2_local_addr_result_result_garbage = 11'h0; // @[LocalAddr.scala:108:26]
wire [10:0] ln_mvout_cmd_rs2_local_addr_result_garbage = 11'h0; // @[LocalAddr.scala:129:26]
wire [15:0] ln_mvout_cmd_rs2_hi_hi_1 = 16'h1; // @[LoopMatmul.scala:619:40]
wire [24:0] ln_mvout_cmd_rs2_hi_1 = 25'h200; // @[LoopMatmul.scala:619:40]
wire [6:0] mvout_cmd_inst_funct = 7'h3; // @[LoopMatmul.scala:558:23]
wire [6:0] ln_mvout_cmd_inst_funct = 7'h3; // @[LoopMatmul.scala:608:26]
wire [6:0] _io_cmd_bits_T_2_inst_funct = 7'h3; // @[Mux.scala:126:16]
wire [2:0] _ln_norm_cmds_WIRE_1 = 3'h2; // @[LoopMatmul.scala:577:37, :589:17]
wire [2:0] ln_norm_cmds_0_1 = 3'h2; // @[LoopMatmul.scala:577:29, :589:17]
wire [2:0] _next_cmd_max_T = 3'h2; // @[Util.scala:39:28]
wire [1:0] mvout_cmd_rs2_hi_hi = 2'h2; // @[LoopMatmul.scala:568:34]
wire [1:0] ln_mvout_cmd_rs2_hi_hi = 2'h2; // @[LoopMatmul.scala:619:40]
wire [1:0] next_cmd_max = 2'h2; // @[Util.scala:39:28]
wire [7:0] io_cmd_bits_status_zero1 = 8'h0; // @[LoopMatmul.scala:514:7]
wire [7:0] mvout_cmd_status_zero1 = 8'h0; // @[LoopMatmul.scala:558:23]
wire [7:0] ln_config_norm_status_zero1 = 8'h0; // @[LoopMatmul.scala:602:28]
wire [7:0] ln_mvout_cmd_status_zero1 = 8'h0; // @[LoopMatmul.scala:608:26]
wire [7:0] _io_cmd_bits_T_2_status_zero1 = 8'h0; // @[Mux.scala:126:16]
wire [7:0] _io_cmd_bits_T_3_status_zero1 = 8'h0; // @[Mux.scala:126:16]
wire [22:0] io_cmd_bits_status_zero2 = 23'h0; // @[LoopMatmul.scala:514:7]
wire [22:0] mvout_cmd_status_zero2 = 23'h0; // @[LoopMatmul.scala:558:23]
wire [22:0] ln_config_norm_status_zero2 = 23'h0; // @[LoopMatmul.scala:602:28]
wire [22:0] ln_mvout_cmd_status_zero2 = 23'h0; // @[LoopMatmul.scala:608:26]
wire [22:0] _io_cmd_bits_T_2_status_zero2 = 23'h0; // @[Mux.scala:126:16]
wire [22:0] _io_cmd_bits_T_3_status_zero2 = 23'h0; // @[Mux.scala:126:16]
wire [1:0] io_cmd_bits_status_dprv = 2'h0; // @[LoopMatmul.scala:514:7]
wire [1:0] io_cmd_bits_status_prv = 2'h0; // @[LoopMatmul.scala:514:7]
wire [1:0] io_cmd_bits_status_sxl = 2'h0; // @[LoopMatmul.scala:514:7]
wire [1:0] io_cmd_bits_status_uxl = 2'h0; // @[LoopMatmul.scala:514:7]
wire [1:0] io_cmd_bits_status_xs = 2'h0; // @[LoopMatmul.scala:514:7]
wire [1:0] io_cmd_bits_status_fs = 2'h0; // @[LoopMatmul.scala:514:7]
wire [1:0] io_cmd_bits_status_mpp = 2'h0; // @[LoopMatmul.scala:514:7]
wire [1:0] io_cmd_bits_status_vs = 2'h0; // @[LoopMatmul.scala:514:7]
wire [1:0] mvout_cmd_status_dprv = 2'h0; // @[LoopMatmul.scala:558:23]
wire [1:0] mvout_cmd_status_prv = 2'h0; // @[LoopMatmul.scala:558:23]
wire [1:0] mvout_cmd_status_sxl = 2'h0; // @[LoopMatmul.scala:558:23]
wire [1:0] mvout_cmd_status_uxl = 2'h0; // @[LoopMatmul.scala:558:23]
wire [1:0] mvout_cmd_status_xs = 2'h0; // @[LoopMatmul.scala:558:23]
wire [1:0] mvout_cmd_status_fs = 2'h0; // @[LoopMatmul.scala:558:23]
wire [1:0] mvout_cmd_status_mpp = 2'h0; // @[LoopMatmul.scala:558:23]
wire [1:0] mvout_cmd_status_vs = 2'h0; // @[LoopMatmul.scala:558:23]
wire [1:0] ln_config_norm_status_dprv = 2'h0; // @[LoopMatmul.scala:602:28]
wire [1:0] ln_config_norm_status_prv = 2'h0; // @[LoopMatmul.scala:602:28]
wire [1:0] ln_config_norm_status_sxl = 2'h0; // @[LoopMatmul.scala:602:28]
wire [1:0] ln_config_norm_status_uxl = 2'h0; // @[LoopMatmul.scala:602:28]
wire [1:0] ln_config_norm_status_xs = 2'h0; // @[LoopMatmul.scala:602:28]
wire [1:0] ln_config_norm_status_fs = 2'h0; // @[LoopMatmul.scala:602:28]
wire [1:0] ln_config_norm_status_mpp = 2'h0; // @[LoopMatmul.scala:602:28]
wire [1:0] ln_config_norm_status_vs = 2'h0; // @[LoopMatmul.scala:602:28]
wire [1:0] ln_mvout_cmd_status_dprv = 2'h0; // @[LoopMatmul.scala:608:26]
wire [1:0] ln_mvout_cmd_status_prv = 2'h0; // @[LoopMatmul.scala:608:26]
wire [1:0] ln_mvout_cmd_status_sxl = 2'h0; // @[LoopMatmul.scala:608:26]
wire [1:0] ln_mvout_cmd_status_uxl = 2'h0; // @[LoopMatmul.scala:608:26]
wire [1:0] ln_mvout_cmd_status_xs = 2'h0; // @[LoopMatmul.scala:608:26]
wire [1:0] ln_mvout_cmd_status_fs = 2'h0; // @[LoopMatmul.scala:608:26]
wire [1:0] ln_mvout_cmd_status_mpp = 2'h0; // @[LoopMatmul.scala:608:26]
wire [1:0] ln_mvout_cmd_status_vs = 2'h0; // @[LoopMatmul.scala:608:26]
wire [1:0] _io_cmd_bits_T_2_status_dprv = 2'h0; // @[Mux.scala:126:16]
wire [1:0] _io_cmd_bits_T_2_status_prv = 2'h0; // @[Mux.scala:126:16]
wire [1:0] _io_cmd_bits_T_2_status_sxl = 2'h0; // @[Mux.scala:126:16]
wire [1:0] _io_cmd_bits_T_2_status_uxl = 2'h0; // @[Mux.scala:126:16]
wire [1:0] _io_cmd_bits_T_2_status_xs = 2'h0; // @[Mux.scala:126:16]
wire [1:0] _io_cmd_bits_T_2_status_fs = 2'h0; // @[Mux.scala:126:16]
wire [1:0] _io_cmd_bits_T_2_status_mpp = 2'h0; // @[Mux.scala:126:16]
wire [1:0] _io_cmd_bits_T_2_status_vs = 2'h0; // @[Mux.scala:126:16]
wire [1:0] _io_cmd_bits_T_3_status_dprv = 2'h0; // @[Mux.scala:126:16]
wire [1:0] _io_cmd_bits_T_3_status_prv = 2'h0; // @[Mux.scala:126:16]
wire [1:0] _io_cmd_bits_T_3_status_sxl = 2'h0; // @[Mux.scala:126:16]
wire [1:0] _io_cmd_bits_T_3_status_uxl = 2'h0; // @[Mux.scala:126:16]
wire [1:0] _io_cmd_bits_T_3_status_xs = 2'h0; // @[Mux.scala:126:16]
wire [1:0] _io_cmd_bits_T_3_status_fs = 2'h0; // @[Mux.scala:126:16]
wire [1:0] _io_cmd_bits_T_3_status_mpp = 2'h0; // @[Mux.scala:126:16]
wire [1:0] _io_cmd_bits_T_3_status_vs = 2'h0; // @[Mux.scala:126:16]
wire [31:0] io_cmd_bits_status_isa = 32'h0; // @[LoopMatmul.scala:514:7]
wire [31:0] mvout_cmd_status_isa = 32'h0; // @[LoopMatmul.scala:558:23]
wire [31:0] ln_config_norm_rs1_q_const = 32'h0; // @[LoopMatmul.scala:596:32]
wire [31:0] ln_config_norm_status_isa = 32'h0; // @[LoopMatmul.scala:602:28]
wire [31:0] ln_mvout_cmd_status_isa = 32'h0; // @[LoopMatmul.scala:608:26]
wire [31:0] _io_cmd_bits_T_2_status_isa = 32'h0; // @[Mux.scala:126:16]
wire [31:0] _io_cmd_bits_T_3_status_isa = 32'h0; // @[Mux.scala:126:16]
wire [6:0] io_cmd_bits_inst_opcode = 7'h0; // @[LoopMatmul.scala:514:7]
wire [6:0] mvout_cmd_inst_opcode = 7'h0; // @[LoopMatmul.scala:558:23]
wire [6:0] ln_config_norm_inst_funct = 7'h0; // @[LoopMatmul.scala:602:28]
wire [6:0] ln_config_norm_inst_opcode = 7'h0; // @[LoopMatmul.scala:602:28]
wire [6:0] ln_mvout_cmd_inst_opcode = 7'h0; // @[LoopMatmul.scala:608:26]
wire [6:0] _io_cmd_bits_T_2_inst_opcode = 7'h0; // @[Mux.scala:126:16]
wire [6:0] _io_cmd_bits_T_3_inst_opcode = 7'h0; // @[Mux.scala:126:16]
wire io_cmd_bits_inst_xd = 1'h0; // @[LoopMatmul.scala:514:7]
wire io_cmd_bits_inst_xs1 = 1'h0; // @[LoopMatmul.scala:514:7]
wire io_cmd_bits_inst_xs2 = 1'h0; // @[LoopMatmul.scala:514:7]
wire io_cmd_bits_status_debug = 1'h0; // @[LoopMatmul.scala:514:7]
wire io_cmd_bits_status_cease = 1'h0; // @[LoopMatmul.scala:514:7]
wire io_cmd_bits_status_wfi = 1'h0; // @[LoopMatmul.scala:514:7]
wire io_cmd_bits_status_dv = 1'h0; // @[LoopMatmul.scala:514:7]
wire io_cmd_bits_status_v = 1'h0; // @[LoopMatmul.scala:514:7]
wire io_cmd_bits_status_sd = 1'h0; // @[LoopMatmul.scala:514:7]
wire io_cmd_bits_status_mpv = 1'h0; // @[LoopMatmul.scala:514:7]
wire io_cmd_bits_status_gva = 1'h0; // @[LoopMatmul.scala:514:7]
wire io_cmd_bits_status_mbe = 1'h0; // @[LoopMatmul.scala:514:7]
wire io_cmd_bits_status_sbe = 1'h0; // @[LoopMatmul.scala:514:7]
wire io_cmd_bits_status_sd_rv32 = 1'h0; // @[LoopMatmul.scala:514:7]
wire io_cmd_bits_status_tsr = 1'h0; // @[LoopMatmul.scala:514:7]
wire io_cmd_bits_status_tw = 1'h0; // @[LoopMatmul.scala:514:7]
wire io_cmd_bits_status_tvm = 1'h0; // @[LoopMatmul.scala:514:7]
wire io_cmd_bits_status_mxr = 1'h0; // @[LoopMatmul.scala:514:7]
wire io_cmd_bits_status_sum = 1'h0; // @[LoopMatmul.scala:514:7]
wire io_cmd_bits_status_mprv = 1'h0; // @[LoopMatmul.scala:514:7]
wire io_cmd_bits_status_spp = 1'h0; // @[LoopMatmul.scala:514:7]
wire io_cmd_bits_status_mpie = 1'h0; // @[LoopMatmul.scala:514:7]
wire io_cmd_bits_status_ube = 1'h0; // @[LoopMatmul.scala:514:7]
wire io_cmd_bits_status_spie = 1'h0; // @[LoopMatmul.scala:514:7]
wire io_cmd_bits_status_upie = 1'h0; // @[LoopMatmul.scala:514:7]
wire io_cmd_bits_status_mie = 1'h0; // @[LoopMatmul.scala:514:7]
wire io_cmd_bits_status_hie = 1'h0; // @[LoopMatmul.scala:514:7]
wire io_cmd_bits_status_sie = 1'h0; // @[LoopMatmul.scala:514:7]
wire io_cmd_bits_status_uie = 1'h0; // @[LoopMatmul.scala:514:7]
wire mvout_cmd_inst_xd = 1'h0; // @[LoopMatmul.scala:558:23]
wire mvout_cmd_inst_xs1 = 1'h0; // @[LoopMatmul.scala:558:23]
wire mvout_cmd_inst_xs2 = 1'h0; // @[LoopMatmul.scala:558:23]
wire mvout_cmd_status_debug = 1'h0; // @[LoopMatmul.scala:558:23]
wire mvout_cmd_status_cease = 1'h0; // @[LoopMatmul.scala:558:23]
wire mvout_cmd_status_wfi = 1'h0; // @[LoopMatmul.scala:558:23]
wire mvout_cmd_status_dv = 1'h0; // @[LoopMatmul.scala:558:23]
wire mvout_cmd_status_v = 1'h0; // @[LoopMatmul.scala:558:23]
wire mvout_cmd_status_sd = 1'h0; // @[LoopMatmul.scala:558:23]
wire mvout_cmd_status_mpv = 1'h0; // @[LoopMatmul.scala:558:23]
wire mvout_cmd_status_gva = 1'h0; // @[LoopMatmul.scala:558:23]
wire mvout_cmd_status_mbe = 1'h0; // @[LoopMatmul.scala:558:23]
wire mvout_cmd_status_sbe = 1'h0; // @[LoopMatmul.scala:558:23]
wire mvout_cmd_status_sd_rv32 = 1'h0; // @[LoopMatmul.scala:558:23]
wire mvout_cmd_status_tsr = 1'h0; // @[LoopMatmul.scala:558:23]
wire mvout_cmd_status_tw = 1'h0; // @[LoopMatmul.scala:558:23]
wire mvout_cmd_status_tvm = 1'h0; // @[LoopMatmul.scala:558:23]
wire mvout_cmd_status_mxr = 1'h0; // @[LoopMatmul.scala:558:23]
wire mvout_cmd_status_sum = 1'h0; // @[LoopMatmul.scala:558:23]
wire mvout_cmd_status_mprv = 1'h0; // @[LoopMatmul.scala:558:23]
wire mvout_cmd_status_spp = 1'h0; // @[LoopMatmul.scala:558:23]
wire mvout_cmd_status_mpie = 1'h0; // @[LoopMatmul.scala:558:23]
wire mvout_cmd_status_ube = 1'h0; // @[LoopMatmul.scala:558:23]
wire mvout_cmd_status_spie = 1'h0; // @[LoopMatmul.scala:558:23]
wire mvout_cmd_status_upie = 1'h0; // @[LoopMatmul.scala:558:23]
wire mvout_cmd_status_mie = 1'h0; // @[LoopMatmul.scala:558:23]
wire mvout_cmd_status_hie = 1'h0; // @[LoopMatmul.scala:558:23]
wire mvout_cmd_status_sie = 1'h0; // @[LoopMatmul.scala:558:23]
wire mvout_cmd_status_uie = 1'h0; // @[LoopMatmul.scala:558:23]
wire mvout_cmd_rs2_local_addr_accumulate = 1'h0; // @[LoopMatmul.scala:563:27]
wire mvout_cmd_rs2_local_addr_result_accumulate = 1'h0; // @[LocalAddr.scala:129:26]
wire ln_config_norm_rs1_q_const_type = 1'h0; // @[LoopMatmul.scala:596:32]
wire ln_config_norm_rs1_act_msb = 1'h0; // @[LoopMatmul.scala:596:32]
wire ln_config_norm_inst_xd = 1'h0; // @[LoopMatmul.scala:602:28]
wire ln_config_norm_inst_xs1 = 1'h0; // @[LoopMatmul.scala:602:28]
wire ln_config_norm_inst_xs2 = 1'h0; // @[LoopMatmul.scala:602:28]
wire ln_config_norm_status_debug = 1'h0; // @[LoopMatmul.scala:602:28]
wire ln_config_norm_status_cease = 1'h0; // @[LoopMatmul.scala:602:28]
wire ln_config_norm_status_wfi = 1'h0; // @[LoopMatmul.scala:602:28]
wire ln_config_norm_status_dv = 1'h0; // @[LoopMatmul.scala:602:28]
wire ln_config_norm_status_v = 1'h0; // @[LoopMatmul.scala:602:28]
wire ln_config_norm_status_sd = 1'h0; // @[LoopMatmul.scala:602:28]
wire ln_config_norm_status_mpv = 1'h0; // @[LoopMatmul.scala:602:28]
wire ln_config_norm_status_gva = 1'h0; // @[LoopMatmul.scala:602:28]
wire ln_config_norm_status_mbe = 1'h0; // @[LoopMatmul.scala:602:28]
wire ln_config_norm_status_sbe = 1'h0; // @[LoopMatmul.scala:602:28]
wire ln_config_norm_status_sd_rv32 = 1'h0; // @[LoopMatmul.scala:602:28]
wire ln_config_norm_status_tsr = 1'h0; // @[LoopMatmul.scala:602:28]
wire ln_config_norm_status_tw = 1'h0; // @[LoopMatmul.scala:602:28]
wire ln_config_norm_status_tvm = 1'h0; // @[LoopMatmul.scala:602:28]
wire ln_config_norm_status_mxr = 1'h0; // @[LoopMatmul.scala:602:28]
wire ln_config_norm_status_sum = 1'h0; // @[LoopMatmul.scala:602:28]
wire ln_config_norm_status_mprv = 1'h0; // @[LoopMatmul.scala:602:28]
wire ln_config_norm_status_spp = 1'h0; // @[LoopMatmul.scala:602:28]
wire ln_config_norm_status_mpie = 1'h0; // @[LoopMatmul.scala:602:28]
wire ln_config_norm_status_ube = 1'h0; // @[LoopMatmul.scala:602:28]
wire ln_config_norm_status_spie = 1'h0; // @[LoopMatmul.scala:602:28]
wire ln_config_norm_status_upie = 1'h0; // @[LoopMatmul.scala:602:28]
wire ln_config_norm_status_mie = 1'h0; // @[LoopMatmul.scala:602:28]
wire ln_config_norm_status_hie = 1'h0; // @[LoopMatmul.scala:602:28]
wire ln_config_norm_status_sie = 1'h0; // @[LoopMatmul.scala:602:28]
wire ln_config_norm_status_uie = 1'h0; // @[LoopMatmul.scala:602:28]
wire ln_mvout_cmd_inst_xd = 1'h0; // @[LoopMatmul.scala:608:26]
wire ln_mvout_cmd_inst_xs1 = 1'h0; // @[LoopMatmul.scala:608:26]
wire ln_mvout_cmd_inst_xs2 = 1'h0; // @[LoopMatmul.scala:608:26]
wire ln_mvout_cmd_status_debug = 1'h0; // @[LoopMatmul.scala:608:26]
wire ln_mvout_cmd_status_cease = 1'h0; // @[LoopMatmul.scala:608:26]
wire ln_mvout_cmd_status_wfi = 1'h0; // @[LoopMatmul.scala:608:26]
wire ln_mvout_cmd_status_dv = 1'h0; // @[LoopMatmul.scala:608:26]
wire ln_mvout_cmd_status_v = 1'h0; // @[LoopMatmul.scala:608:26]
wire ln_mvout_cmd_status_sd = 1'h0; // @[LoopMatmul.scala:608:26]
wire ln_mvout_cmd_status_mpv = 1'h0; // @[LoopMatmul.scala:608:26]
wire ln_mvout_cmd_status_gva = 1'h0; // @[LoopMatmul.scala:608:26]
wire ln_mvout_cmd_status_mbe = 1'h0; // @[LoopMatmul.scala:608:26]
wire ln_mvout_cmd_status_sbe = 1'h0; // @[LoopMatmul.scala:608:26]
wire ln_mvout_cmd_status_sd_rv32 = 1'h0; // @[LoopMatmul.scala:608:26]
wire ln_mvout_cmd_status_tsr = 1'h0; // @[LoopMatmul.scala:608:26]
wire ln_mvout_cmd_status_tw = 1'h0; // @[LoopMatmul.scala:608:26]
wire ln_mvout_cmd_status_tvm = 1'h0; // @[LoopMatmul.scala:608:26]
wire ln_mvout_cmd_status_mxr = 1'h0; // @[LoopMatmul.scala:608:26]
wire ln_mvout_cmd_status_sum = 1'h0; // @[LoopMatmul.scala:608:26]
wire ln_mvout_cmd_status_mprv = 1'h0; // @[LoopMatmul.scala:608:26]
wire ln_mvout_cmd_status_spp = 1'h0; // @[LoopMatmul.scala:608:26]
wire ln_mvout_cmd_status_mpie = 1'h0; // @[LoopMatmul.scala:608:26]
wire ln_mvout_cmd_status_ube = 1'h0; // @[LoopMatmul.scala:608:26]
wire ln_mvout_cmd_status_spie = 1'h0; // @[LoopMatmul.scala:608:26]
wire ln_mvout_cmd_status_upie = 1'h0; // @[LoopMatmul.scala:608:26]
wire ln_mvout_cmd_status_mie = 1'h0; // @[LoopMatmul.scala:608:26]
wire ln_mvout_cmd_status_hie = 1'h0; // @[LoopMatmul.scala:608:26]
wire ln_mvout_cmd_status_sie = 1'h0; // @[LoopMatmul.scala:608:26]
wire ln_mvout_cmd_status_uie = 1'h0; // @[LoopMatmul.scala:608:26]
wire ln_mvout_cmd_rs2_local_addr_accumulate = 1'h0; // @[LoopMatmul.scala:613:30]
wire ln_mvout_cmd_rs2_local_addr_result_accumulate = 1'h0; // @[LocalAddr.scala:129:26]
wire _io_cmd_bits_T_2_inst_xd = 1'h0; // @[Mux.scala:126:16]
wire _io_cmd_bits_T_2_inst_xs1 = 1'h0; // @[Mux.scala:126:16]
wire _io_cmd_bits_T_2_inst_xs2 = 1'h0; // @[Mux.scala:126:16]
wire _io_cmd_bits_T_2_status_debug = 1'h0; // @[Mux.scala:126:16]
wire _io_cmd_bits_T_2_status_cease = 1'h0; // @[Mux.scala:126:16]
wire _io_cmd_bits_T_2_status_wfi = 1'h0; // @[Mux.scala:126:16]
wire _io_cmd_bits_T_2_status_dv = 1'h0; // @[Mux.scala:126:16]
wire _io_cmd_bits_T_2_status_v = 1'h0; // @[Mux.scala:126:16]
wire _io_cmd_bits_T_2_status_sd = 1'h0; // @[Mux.scala:126:16]
wire _io_cmd_bits_T_2_status_mpv = 1'h0; // @[Mux.scala:126:16]
wire _io_cmd_bits_T_2_status_gva = 1'h0; // @[Mux.scala:126:16]
wire _io_cmd_bits_T_2_status_mbe = 1'h0; // @[Mux.scala:126:16]
wire _io_cmd_bits_T_2_status_sbe = 1'h0; // @[Mux.scala:126:16]
wire _io_cmd_bits_T_2_status_sd_rv32 = 1'h0; // @[Mux.scala:126:16]
wire _io_cmd_bits_T_2_status_tsr = 1'h0; // @[Mux.scala:126:16]
wire _io_cmd_bits_T_2_status_tw = 1'h0; // @[Mux.scala:126:16]
wire _io_cmd_bits_T_2_status_tvm = 1'h0; // @[Mux.scala:126:16]
wire _io_cmd_bits_T_2_status_mxr = 1'h0; // @[Mux.scala:126:16]
wire _io_cmd_bits_T_2_status_sum = 1'h0; // @[Mux.scala:126:16]
wire _io_cmd_bits_T_2_status_mprv = 1'h0; // @[Mux.scala:126:16]
wire _io_cmd_bits_T_2_status_spp = 1'h0; // @[Mux.scala:126:16]
wire _io_cmd_bits_T_2_status_mpie = 1'h0; // @[Mux.scala:126:16]
wire _io_cmd_bits_T_2_status_ube = 1'h0; // @[Mux.scala:126:16]
wire _io_cmd_bits_T_2_status_spie = 1'h0; // @[Mux.scala:126:16]
wire _io_cmd_bits_T_2_status_upie = 1'h0; // @[Mux.scala:126:16]
wire _io_cmd_bits_T_2_status_mie = 1'h0; // @[Mux.scala:126:16]
wire _io_cmd_bits_T_2_status_hie = 1'h0; // @[Mux.scala:126:16]
wire _io_cmd_bits_T_2_status_sie = 1'h0; // @[Mux.scala:126:16]
wire _io_cmd_bits_T_2_status_uie = 1'h0; // @[Mux.scala:126:16]
wire _io_cmd_bits_T_3_inst_xd = 1'h0; // @[Mux.scala:126:16]
wire _io_cmd_bits_T_3_inst_xs1 = 1'h0; // @[Mux.scala:126:16]
wire _io_cmd_bits_T_3_inst_xs2 = 1'h0; // @[Mux.scala:126:16]
wire _io_cmd_bits_T_3_status_debug = 1'h0; // @[Mux.scala:126:16]
wire _io_cmd_bits_T_3_status_cease = 1'h0; // @[Mux.scala:126:16]
wire _io_cmd_bits_T_3_status_wfi = 1'h0; // @[Mux.scala:126:16]
wire _io_cmd_bits_T_3_status_dv = 1'h0; // @[Mux.scala:126:16]
wire _io_cmd_bits_T_3_status_v = 1'h0; // @[Mux.scala:126:16]
wire _io_cmd_bits_T_3_status_sd = 1'h0; // @[Mux.scala:126:16]
wire _io_cmd_bits_T_3_status_mpv = 1'h0; // @[Mux.scala:126:16]
wire _io_cmd_bits_T_3_status_gva = 1'h0; // @[Mux.scala:126:16]
wire _io_cmd_bits_T_3_status_mbe = 1'h0; // @[Mux.scala:126:16]
wire _io_cmd_bits_T_3_status_sbe = 1'h0; // @[Mux.scala:126:16]
wire _io_cmd_bits_T_3_status_sd_rv32 = 1'h0; // @[Mux.scala:126:16]
wire _io_cmd_bits_T_3_status_tsr = 1'h0; // @[Mux.scala:126:16]
wire _io_cmd_bits_T_3_status_tw = 1'h0; // @[Mux.scala:126:16]
wire _io_cmd_bits_T_3_status_tvm = 1'h0; // @[Mux.scala:126:16]
wire _io_cmd_bits_T_3_status_mxr = 1'h0; // @[Mux.scala:126:16]
wire _io_cmd_bits_T_3_status_sum = 1'h0; // @[Mux.scala:126:16]
wire _io_cmd_bits_T_3_status_mprv = 1'h0; // @[Mux.scala:126:16]
wire _io_cmd_bits_T_3_status_spp = 1'h0; // @[Mux.scala:126:16]
wire _io_cmd_bits_T_3_status_mpie = 1'h0; // @[Mux.scala:126:16]
wire _io_cmd_bits_T_3_status_ube = 1'h0; // @[Mux.scala:126:16]
wire _io_cmd_bits_T_3_status_spie = 1'h0; // @[Mux.scala:126:16]
wire _io_cmd_bits_T_3_status_upie = 1'h0; // @[Mux.scala:126:16]
wire _io_cmd_bits_T_3_status_mie = 1'h0; // @[Mux.scala:126:16]
wire _io_cmd_bits_T_3_status_hie = 1'h0; // @[Mux.scala:126:16]
wire _io_cmd_bits_T_3_status_sie = 1'h0; // @[Mux.scala:126:16]
wire _io_cmd_bits_T_3_status_uie = 1'h0; // @[Mux.scala:126:16]
wire _next_i_T_2 = 1'h0; // @[Util.scala:42:8]
wire _next_j_T_9 = 1'h0; // @[Util.scala:42:8]
wire [4:0] io_cmd_bits_inst_rs2 = 5'h0; // @[LoopMatmul.scala:514:7]
wire [4:0] io_cmd_bits_inst_rs1 = 5'h0; // @[LoopMatmul.scala:514:7]
wire [4:0] io_cmd_bits_inst_rd = 5'h0; // @[LoopMatmul.scala:514:7]
wire [4:0] mvout_cmd_inst_rs2 = 5'h0; // @[LoopMatmul.scala:558:23]
wire [4:0] mvout_cmd_inst_rs1 = 5'h0; // @[LoopMatmul.scala:558:23]
wire [4:0] mvout_cmd_inst_rd = 5'h0; // @[LoopMatmul.scala:558:23]
wire [4:0] ln_config_norm_inst_rs2 = 5'h0; // @[LoopMatmul.scala:602:28]
wire [4:0] ln_config_norm_inst_rs1 = 5'h0; // @[LoopMatmul.scala:602:28]
wire [4:0] ln_config_norm_inst_rd = 5'h0; // @[LoopMatmul.scala:602:28]
wire [4:0] ln_mvout_cmd_inst_rs2 = 5'h0; // @[LoopMatmul.scala:608:26]
wire [4:0] ln_mvout_cmd_inst_rs1 = 5'h0; // @[LoopMatmul.scala:608:26]
wire [4:0] ln_mvout_cmd_inst_rd = 5'h0; // @[LoopMatmul.scala:608:26]
wire _io_req_ready_T; // @[LoopMatmul.scala:621:25]
wire [4:0] _io_cmd_bits_T_2_inst_rs2 = 5'h0; // @[Mux.scala:126:16]
wire [4:0] _io_cmd_bits_T_2_inst_rs1 = 5'h0; // @[Mux.scala:126:16]
wire [4:0] _io_cmd_bits_T_2_inst_rd = 5'h0; // @[Mux.scala:126:16]
wire [4:0] _io_cmd_bits_T_3_inst_rs2 = 5'h0; // @[Mux.scala:126:16]
wire [4:0] _io_cmd_bits_T_3_inst_rs1 = 5'h0; // @[Mux.scala:126:16]
wire [4:0] _io_cmd_bits_T_3_inst_rd = 5'h0; // @[Mux.scala:126:16]
wire _io_cmd_valid_T_5; // @[LoopMatmul.scala:636:68]
wire [6:0] _io_cmd_bits_T_3_inst_funct; // @[Mux.scala:126:16]
wire [63:0] _io_cmd_bits_T_3_rs1; // @[Mux.scala:126:16]
wire [63:0] _io_cmd_bits_T_3_rs2; // @[Mux.scala:126:16]
wire _io_idle_T; // @[LoopMatmul.scala:624:20]
wire io_req_ready_0; // @[LoopMatmul.scala:514:7]
wire [6:0] io_cmd_bits_inst_funct_0; // @[LoopMatmul.scala:514:7]
wire [63:0] io_cmd_bits_rs1_0; // @[LoopMatmul.scala:514:7]
wire [63:0] io_cmd_bits_rs2_0; // @[LoopMatmul.scala:514:7]
wire io_cmd_valid_0; // @[LoopMatmul.scala:514:7]
wire [15:0] io_j; // @[LoopMatmul.scala:514:7]
wire [15:0] io_i; // @[LoopMatmul.scala:514:7]
wire io_idle_0; // @[LoopMatmul.scala:514:7]
wire io_loop_id_0; // @[LoopMatmul.scala:514:7]
reg [1:0] state; // @[LoopMatmul.scala:538:22]
reg [15:0] req_max_k; // @[LoopMatmul.scala:540:16]
reg [15:0] req_max_j; // @[LoopMatmul.scala:540:16]
reg [15:0] req_max_i; // @[LoopMatmul.scala:540:16]
reg [3:0] req_pad_j; // @[LoopMatmul.scala:540:16]
reg [3:0] req_pad_i; // @[LoopMatmul.scala:540:16]
reg [39:0] req_dram_addr; // @[LoopMatmul.scala:540:16]
reg [39:0] req_dram_stride; // @[LoopMatmul.scala:540:16]
reg req_full_c; // @[LoopMatmul.scala:540:16]
wire mvout_cmd_rs2_local_addr_result_read_full_acc_row = req_full_c; // @[LoopMatmul.scala:540:16]
wire ln_mvout_cmd_rs2_local_addr_result_read_full_acc_row = req_full_c; // @[LoopMatmul.scala:540:16]
reg [2:0] req_act; // @[LoopMatmul.scala:540:16]
reg [9:0] req_addr_start; // @[LoopMatmul.scala:540:16]
reg req_loop_id; // @[LoopMatmul.scala:540:16]
assign io_loop_id_0 = req_loop_id; // @[LoopMatmul.scala:514:7, :540:16]
reg req_is_resadd; // @[LoopMatmul.scala:540:16]
wire _max_blocks_T = req_max_j < 16'h5; // @[LoopMatmul.scala:540:16, :542:55]
wire [15:0] _max_blocks_T_1 = _max_blocks_T ? req_max_j : 16'h4; // @[LoopMatmul.scala:540:16, :542:{44,55}]
wire [15:0] max_blocks = req_full_c ? 16'h1 : _max_blocks_T_1; // @[LoopMatmul.scala:540:16, :542:{23,44}]
reg [15:0] j; // @[LoopMatmul.scala:545:14]
assign io_j = j; // @[LoopMatmul.scala:514:7, :545:14]
reg [15:0] i; // @[LoopMatmul.scala:546:14]
assign io_i = i; // @[LoopMatmul.scala:514:7, :546:14]
wire [55:0] _GEN_3 = {40'h0, i} * {16'h0, req_dram_stride}; // @[LoopMatmul.scala:540:16, :546:14, :550:40]
wire [55:0] _dram_offset_T; // @[LoopMatmul.scala:550:40]
assign _dram_offset_T = _GEN_3; // @[LoopMatmul.scala:550:40]
wire [55:0] _dram_offset_T_5; // @[LoopMatmul.scala:551:8]
assign _dram_offset_T_5 = _GEN_3; // @[LoopMatmul.scala:550:40, :551:8]
wire [55:0] _ln_dram_offset_T; // @[LoopMatmul.scala:593:28]
assign _ln_dram_offset_T = _GEN_3; // @[LoopMatmul.scala:550:40, :593:28]
wire [56:0] _GEN_4 = {41'h0, j}; // @[LoopMatmul.scala:545:14, :550:58]
wire [56:0] _dram_offset_T_1 = {1'h0, _dram_offset_T} + _GEN_4; // @[LoopMatmul.scala:550:{40,58}]
wire [55:0] _dram_offset_T_2 = _dram_offset_T_1[55:0]; // @[LoopMatmul.scala:550:58]
wire [60:0] _dram_offset_T_3 = {1'h0, _dram_offset_T_2, 4'h0}; // @[LoopMatmul.scala:550:{58,63}]
wire [63:0] _dram_offset_T_4 = {1'h0, _dram_offset_T_3, 2'h0}; // @[LoopMatmul.scala:550:{63,78}]
wire [56:0] _dram_offset_T_6 = {1'h0, _dram_offset_T_5} + _GEN_4; // @[LoopMatmul.scala:550:58, :551:{8,26}]
wire [55:0] _dram_offset_T_7 = _dram_offset_T_6[55:0]; // @[LoopMatmul.scala:551:26]
wire [60:0] _dram_offset_T_8 = {1'h0, _dram_offset_T_7, 4'h0}; // @[LoopMatmul.scala:551:{26,31}]
wire [61:0] _dram_offset_T_9 = {1'h0, _dram_offset_T_8}; // @[LoopMatmul.scala:551:{31,46}]
wire [63:0] dram_offset = req_full_c ? _dram_offset_T_4 : {2'h0, _dram_offset_T_9}; // @[LoopMatmul.scala:540:16, :550:{24,78}, :551:46]
wire [63:0] _dram_addr_T = {32'h0, dram_offset[31:0]}; // @[LoopMatmul.scala:550:24, :1139:17]
wire [64:0] _GEN_5 = {25'h0, req_dram_addr}; // @[LoopMatmul.scala:540:16, :552:33]
wire [64:0] _dram_addr_T_1 = _GEN_5 + {1'h0, _dram_addr_T}; // @[LoopMatmul.scala:552:33, :1139:17]
wire [63:0] dram_addr = _dram_addr_T_1[63:0]; // @[LoopMatmul.scala:552:33]
wire [63:0] mvout_cmd_rs1 = dram_addr; // @[LoopMatmul.scala:552:33, :558:23]
wire [31:0] _GEN_6 = {16'h0, i} * {16'h0, req_max_j}; // @[LoopMatmul.scala:540:16, :546:14, :553:37]
wire [31:0] _sp_addr_T; // @[LoopMatmul.scala:553:37]
assign _sp_addr_T = _GEN_6; // @[LoopMatmul.scala:553:37]
wire [31:0] _ln_sp_addr_T; // @[LoopMatmul.scala:587:41]
assign _ln_sp_addr_T = _GEN_6; // @[LoopMatmul.scala:553:37, :587:41]
wire [32:0] _GEN_7 = {17'h0, j}; // @[LoopMatmul.scala:545:14, :553:49]
wire [32:0] _sp_addr_T_1 = {1'h0, _sp_addr_T} + _GEN_7; // @[LoopMatmul.scala:553:{37,49}]
wire [31:0] _sp_addr_T_2 = _sp_addr_T_1[31:0]; // @[LoopMatmul.scala:553:49]
wire [36:0] _sp_addr_T_3 = {1'h0, _sp_addr_T_2, 4'h0}; // @[LoopMatmul.scala:553:{49,54}]
wire [37:0] _sp_addr_T_4 = {28'h0, req_addr_start} + {1'h0, _sp_addr_T_3}; // @[LoopMatmul.scala:540:16, :553:{32,54}]
wire [36:0] sp_addr = _sp_addr_T_4[36:0]; // @[LoopMatmul.scala:553:32]
wire [16:0] _GEN_8 = {1'h0, j}; // @[LoopMatmul.scala:545:14, :554:22]
wire [16:0] _GEN_9 = _GEN_8 + {1'h0, max_blocks}; // @[LoopMatmul.scala:542:23, :554:22]
wire [16:0] _blocks_T; // @[LoopMatmul.scala:554:22]
assign _blocks_T = _GEN_9; // @[LoopMatmul.scala:554:22]
wire [16:0] _ln_norm_cmd_T; // @[LoopMatmul.scala:588:27]
assign _ln_norm_cmd_T = _GEN_9; // @[LoopMatmul.scala:554:22, :588:27]
wire [16:0] _next_j_T_1; // @[Util.scala:41:15]
assign _next_j_T_1 = _GEN_9; // @[Util.scala:41:15]
wire [16:0] _next_j_T_4; // @[Util.scala:43:11]
assign _next_j_T_4 = _GEN_9; // @[Util.scala:43:11]
wire [16:0] _next_j_T_7; // @[Util.scala:41:15]
assign _next_j_T_7 = _GEN_9; // @[Util.scala:41:15]
wire [16:0] _next_j_T_10; // @[Util.scala:43:11]
assign _next_j_T_10 = _GEN_9; // @[Util.scala:43:11]
wire [15:0] _blocks_T_1 = _blocks_T[15:0]; // @[LoopMatmul.scala:554:22]
wire _blocks_T_2 = _blocks_T_1 <= req_max_j; // @[LoopMatmul.scala:540:16, :554:{22,35}]
wire [16:0] _GEN_10 = {1'h0, req_max_j}; // @[LoopMatmul.scala:540:16, :554:70]
wire [16:0] _blocks_T_3 = _GEN_10 - _GEN_8; // @[LoopMatmul.scala:554:{22,70}]
wire [15:0] _blocks_T_4 = _blocks_T_3[15:0]; // @[LoopMatmul.scala:554:70]
wire [15:0] blocks = _blocks_T_2 ? max_blocks : _blocks_T_4; // @[LoopMatmul.scala:542:23, :554:{19,35,70}]
wire [20:0] _cols_T = {1'h0, blocks, 4'h0}; // @[LoopMatmul.scala:554:19, :555:22]
wire [16:0] _GEN_11 = _GEN_8 + {1'h0, blocks}; // @[LoopMatmul.scala:554:{19,22}, :555:46]
wire [16:0] _cols_T_1; // @[LoopMatmul.scala:555:46]
assign _cols_T_1 = _GEN_11; // @[LoopMatmul.scala:555:46]
wire [16:0] _ex_ahead_T_6; // @[LoopMatmul.scala:630:23]
assign _ex_ahead_T_6 = _GEN_11; // @[LoopMatmul.scala:555:46, :630:23]
wire [16:0] _ex_ahead_T_9; // @[LoopMatmul.scala:631:27]
assign _ex_ahead_T_9 = _GEN_11; // @[LoopMatmul.scala:555:46, :631:27]
wire [16:0] _ex_ahead_T_22; // @[LoopMatmul.scala:633:83]
assign _ex_ahead_T_22 = _GEN_11; // @[LoopMatmul.scala:555:46, :633:83]
wire [15:0] _cols_T_2 = _cols_T_1[15:0]; // @[LoopMatmul.scala:555:46]
wire _cols_T_3 = _cols_T_2 >= req_max_j; // @[LoopMatmul.scala:540:16, :555:{46,55}]
wire [3:0] _cols_T_4 = _cols_T_3 ? req_pad_j : 4'h0; // @[LoopMatmul.scala:540:16, :555:{43,55}]
wire [21:0] _cols_T_5 = {1'h0, _cols_T} - {18'h0, _cols_T_4}; // @[LoopMatmul.scala:555:{22,38,43}]
wire [20:0] cols = _cols_T_5[20:0]; // @[LoopMatmul.scala:555:38]
wire [16:0] _GEN_12 = {1'h0, req_max_i} - 17'h1; // @[LoopMatmul.scala:540:16, :556:48]
wire [16:0] _rows_T; // @[LoopMatmul.scala:556:48]
assign _rows_T = _GEN_12; // @[LoopMatmul.scala:556:48]
wire [16:0] _next_i_max_T; // @[Util.scala:39:28]
assign _next_i_max_T = _GEN_12; // @[Util.scala:39:28]
wire [16:0] _next_i_max_T_1; // @[Util.scala:39:28]
assign _next_i_max_T_1 = _GEN_12; // @[Util.scala:39:28]
wire [15:0] _rows_T_1 = _rows_T[15:0]; // @[LoopMatmul.scala:556:48]
wire _rows_T_2 = i == _rows_T_1; // @[LoopMatmul.scala:546:14, :556:{35,48}]
wire [3:0] _rows_T_3 = _rows_T_2 ? req_pad_i : 4'h0; // @[LoopMatmul.scala:540:16, :556:{32,35}]
wire [5:0] _rows_T_4 = 6'h10 - {2'h0, _rows_T_3}; // @[LoopMatmul.scala:556:{27,32}]
wire [4:0] rows = _rows_T_4[4:0]; // @[LoopMatmul.scala:556:27]
wire [4:0] mvout_cmd_rs2_num_rows = rows; // @[LoopMatmul.scala:556:27, :563:27]
wire [63:0] _mvout_cmd_rs2_T_2; // @[LoopMatmul.scala:568:34]
wire [63:0] mvout_cmd_rs2; // @[LoopMatmul.scala:558:23]
wire [6:0] mvout_cmd_rs2_lo_hi_1 = mvout_cmd_rs2_num_cols; // @[LoopMatmul.scala:563:27, :568:34]
wire [2:0] mvout_cmd_rs2_local_addr_result_norm_cmd; // @[LocalAddr.scala:129:26]
wire [2:0] _mvout_cmd_rs2_T = mvout_cmd_rs2_local_addr_norm_cmd; // @[LoopMatmul.scala:563:27, :568:34]
wire mvout_cmd_rs2_local_addr_result_garbage_bit; // @[LocalAddr.scala:129:26]
wire [13:0] mvout_cmd_rs2_local_addr_result_data; // @[LocalAddr.scala:129:26]
wire mvout_cmd_rs2_local_addr_read_full_acc_row; // @[LoopMatmul.scala:563:27]
wire mvout_cmd_rs2_local_addr_garbage_bit; // @[LoopMatmul.scala:563:27]
wire [13:0] mvout_cmd_rs2_local_addr_data; // @[LoopMatmul.scala:563:27]
assign mvout_cmd_rs2_num_cols = cols[6:0]; // @[LoopMatmul.scala:555:38, :563:27, :566:26]
wire _mvout_cmd_rs2_local_addr_result_result_T_6; // @[LocalAddr.scala:108:37]
wire [6:0] ln_mvout_cmd_rs2_num_cols = cols[6:0]; // @[LoopMatmul.scala:555:38, :566:26, :613:30]
wire _mvout_cmd_rs2_local_addr_result_result_T_5; // @[LocalAddr.scala:108:37]
wire mvout_cmd_rs2_local_addr_result_result_is_acc_addr = _mvout_cmd_rs2_local_addr_result_result_WIRE_is_acc_addr; // @[LocalAddr.scala:108:{26,37}]
wire _mvout_cmd_rs2_local_addr_result_result_T_4; // @[LocalAddr.scala:108:37]
wire mvout_cmd_rs2_local_addr_result_result_accumulate = _mvout_cmd_rs2_local_addr_result_result_WIRE_accumulate; // @[LocalAddr.scala:108:{26,37}]
wire [2:0] _mvout_cmd_rs2_local_addr_result_result_WIRE_3; // @[LocalAddr.scala:108:37]
wire mvout_cmd_rs2_local_addr_result_result_read_full_acc_row = _mvout_cmd_rs2_local_addr_result_result_WIRE_read_full_acc_row; // @[LocalAddr.scala:108:{26,37}]
wire [10:0] _mvout_cmd_rs2_local_addr_result_result_T_2; // @[LocalAddr.scala:108:37]
wire [2:0] mvout_cmd_rs2_local_addr_result_result_norm_cmd = _mvout_cmd_rs2_local_addr_result_result_WIRE_norm_cmd; // @[LocalAddr.scala:108:{26,37}]
wire _mvout_cmd_rs2_local_addr_result_result_T_1; // @[LocalAddr.scala:108:37]
wire [13:0] _mvout_cmd_rs2_local_addr_result_result_T; // @[LocalAddr.scala:108:37]
wire mvout_cmd_rs2_local_addr_result_result_garbage_bit = _mvout_cmd_rs2_local_addr_result_result_WIRE_garbage_bit; // @[LocalAddr.scala:108:{26,37}]
wire [13:0] mvout_cmd_rs2_local_addr_result_result_data = _mvout_cmd_rs2_local_addr_result_result_WIRE_data; // @[LocalAddr.scala:108:{26,37}]
wire [31:0] _mvout_cmd_rs2_local_addr_result_result_WIRE_1 = sp_addr[31:0]; // @[LoopMatmul.scala:553:32]
assign _mvout_cmd_rs2_local_addr_result_result_T = _mvout_cmd_rs2_local_addr_result_result_WIRE_1[13:0]; // @[LocalAddr.scala:108:37]
assign _mvout_cmd_rs2_local_addr_result_result_WIRE_data = _mvout_cmd_rs2_local_addr_result_result_T; // @[LocalAddr.scala:108:37]
assign _mvout_cmd_rs2_local_addr_result_result_T_1 = _mvout_cmd_rs2_local_addr_result_result_WIRE_1[14]; // @[LocalAddr.scala:108:37]
assign _mvout_cmd_rs2_local_addr_result_result_WIRE_garbage_bit = _mvout_cmd_rs2_local_addr_result_result_T_1; // @[LocalAddr.scala:108:37]
assign _mvout_cmd_rs2_local_addr_result_result_T_2 = _mvout_cmd_rs2_local_addr_result_result_WIRE_1[25:15]; // @[LocalAddr.scala:108:37]
wire [10:0] _mvout_cmd_rs2_local_addr_result_result_WIRE_garbage = _mvout_cmd_rs2_local_addr_result_result_T_2; // @[LocalAddr.scala:108:37]
wire [2:0] _mvout_cmd_rs2_local_addr_result_result_T_3 = _mvout_cmd_rs2_local_addr_result_result_WIRE_1[28:26]; // @[LocalAddr.scala:108:37]
wire [2:0] _mvout_cmd_rs2_local_addr_result_result_WIRE_2 = _mvout_cmd_rs2_local_addr_result_result_T_3; // @[LocalAddr.scala:108:37]
assign _mvout_cmd_rs2_local_addr_result_result_WIRE_3 = _mvout_cmd_rs2_local_addr_result_result_WIRE_2; // @[LocalAddr.scala:108:37]
assign _mvout_cmd_rs2_local_addr_result_result_WIRE_norm_cmd = _mvout_cmd_rs2_local_addr_result_result_WIRE_3; // @[LocalAddr.scala:108:37]
assign _mvout_cmd_rs2_local_addr_result_result_T_4 = _mvout_cmd_rs2_local_addr_result_result_WIRE_1[29]; // @[LocalAddr.scala:108:37]
assign _mvout_cmd_rs2_local_addr_result_result_WIRE_read_full_acc_row = _mvout_cmd_rs2_local_addr_result_result_T_4; // @[LocalAddr.scala:108:37]
assign _mvout_cmd_rs2_local_addr_result_result_T_5 = _mvout_cmd_rs2_local_addr_result_result_WIRE_1[30]; // @[LocalAddr.scala:108:37]
assign _mvout_cmd_rs2_local_addr_result_result_WIRE_accumulate = _mvout_cmd_rs2_local_addr_result_result_T_5; // @[LocalAddr.scala:108:37]
assign _mvout_cmd_rs2_local_addr_result_result_T_6 = _mvout_cmd_rs2_local_addr_result_result_WIRE_1[31]; // @[LocalAddr.scala:108:37]
assign _mvout_cmd_rs2_local_addr_result_result_WIRE_is_acc_addr = _mvout_cmd_rs2_local_addr_result_result_T_6; // @[LocalAddr.scala:108:37]
assign mvout_cmd_rs2_local_addr_result_norm_cmd = mvout_cmd_rs2_local_addr_result_result_norm_cmd; // @[LocalAddr.scala:108:26, :129:26]
assign mvout_cmd_rs2_local_addr_result_garbage_bit = mvout_cmd_rs2_local_addr_result_result_garbage_bit; // @[LocalAddr.scala:108:26, :129:26]
assign mvout_cmd_rs2_local_addr_result_data = mvout_cmd_rs2_local_addr_result_result_data; // @[LocalAddr.scala:108:26, :129:26]
assign mvout_cmd_rs2_local_addr_read_full_acc_row = mvout_cmd_rs2_local_addr_result_read_full_acc_row; // @[LoopMatmul.scala:563:27]
assign mvout_cmd_rs2_local_addr_norm_cmd = mvout_cmd_rs2_local_addr_result_norm_cmd; // @[LoopMatmul.scala:563:27]
assign mvout_cmd_rs2_local_addr_garbage_bit = mvout_cmd_rs2_local_addr_result_garbage_bit; // @[LoopMatmul.scala:563:27]
assign mvout_cmd_rs2_local_addr_data = mvout_cmd_rs2_local_addr_result_data; // @[LoopMatmul.scala:563:27]
wire [11:0] mvout_cmd_rs2_lo_hi = {11'h0, mvout_cmd_rs2_local_addr_garbage_bit}; // @[LoopMatmul.scala:563:27, :568:34]
wire [25:0] mvout_cmd_rs2_lo = {mvout_cmd_rs2_lo_hi, mvout_cmd_rs2_local_addr_data}; // @[LoopMatmul.scala:563:27, :568:34]
wire [3:0] mvout_cmd_rs2_hi_lo = {mvout_cmd_rs2_local_addr_read_full_acc_row, _mvout_cmd_rs2_T}; // @[LoopMatmul.scala:563:27, :568:34]
wire [5:0] mvout_cmd_rs2_hi = {2'h2, mvout_cmd_rs2_hi_lo}; // @[LoopMatmul.scala:568:34]
wire [31:0] _mvout_cmd_rs2_T_1 = {mvout_cmd_rs2_hi, mvout_cmd_rs2_lo}; // @[LoopMatmul.scala:568:34]
wire [38:0] mvout_cmd_rs2_lo_1 = {mvout_cmd_rs2_lo_hi_1, _mvout_cmd_rs2_T_1}; // @[LoopMatmul.scala:568:34]
wire [15:0] mvout_cmd_rs2_hi_hi_1 = {11'h0, mvout_cmd_rs2_num_rows}; // @[LoopMatmul.scala:563:27, :568:34]
wire [24:0] mvout_cmd_rs2_hi_1 = {mvout_cmd_rs2_hi_hi_1, 9'h0}; // @[LoopMatmul.scala:568:34]
assign _mvout_cmd_rs2_T_2 = {mvout_cmd_rs2_hi_1, mvout_cmd_rs2_lo_1}; // @[LoopMatmul.scala:568:34]
assign mvout_cmd_rs2 = _mvout_cmd_rs2_T_2; // @[LoopMatmul.scala:558:23, :568:34]
reg [15:0] ln_row; // @[LoopMatmul.scala:571:19]
reg [15:0] ln_cmd; // @[LoopMatmul.scala:572:19]
reg [15:0] ln_stat_id; // @[LoopMatmul.scala:573:23]
wire [16:0] _GEN_13 = {1'h0, ln_row}; // @[LoopMatmul.scala:571:19, :583:30]
wire [16:0] _GEN_14 = {12'h0, rows} - _GEN_13; // @[LoopMatmul.scala:556:27, :583:30]
wire [16:0] _ln_stat_ids_T; // @[LoopMatmul.scala:583:30]
assign _ln_stat_ids_T = _GEN_14; // @[LoopMatmul.scala:583:30]
wire [16:0] _ln_stat_ids_T_3; // @[LoopMatmul.scala:583:81]
assign _ln_stat_ids_T_3 = _GEN_14; // @[LoopMatmul.scala:583:{30,81}]
wire [16:0] _ln_stat_ids_T_1 = _ln_stat_ids_T; // @[LoopMatmul.scala:583:30]
wire _ln_stat_ids_T_2 = _ln_stat_ids_T_1 > 17'h2; // @[LoopMatmul.scala:583:{30,40}]
wire [16:0] _ln_stat_ids_T_4 = _ln_stat_ids_T_3; // @[LoopMatmul.scala:583:81]
wire [16:0] ln_stat_ids = _ln_stat_ids_T_2 ? 17'h2 : _ln_stat_ids_T_4; // @[LoopMatmul.scala:583:{24,40,81}]
wire [16:0] _GEN_15 = {1'h0, ln_stat_id}; // @[LoopMatmul.scala:573:23, :585:21]
wire [16:0] ln_r = _GEN_13 + _GEN_15; // @[LoopMatmul.scala:583:30, :585:21]
wire [32:0] _ln_sp_addr_T_1 = {1'h0, _ln_sp_addr_T} + _GEN_7; // @[LoopMatmul.scala:553:49, :587:{41,53}]
wire [37:0] _ln_sp_addr_T_2 = {1'h0, _ln_sp_addr_T_1, 4'h0}; // @[LoopMatmul.scala:587:{53,59}]
wire [38:0] _ln_sp_addr_T_3 = {29'h0, req_addr_start} + {1'h0, _ln_sp_addr_T_2}; // @[LoopMatmul.scala:540:16, :587:{35,59}]
wire [39:0] ln_sp_addr = {1'h0, _ln_sp_addr_T_3} + {23'h0, ln_r}; // @[LoopMatmul.scala:585:21, :587:{35,74}]
wire _ln_norm_cmd_T_1 = _ln_norm_cmd_T >= _GEN_10; // @[LoopMatmul.scala:554:70, :588:{27,41}]
wire _GEN_16 = req_act == 3'h2; // @[LoopMatmul.scala:540:16, :589:17]
wire _ln_norm_cmd_T_2; // @[LoopMatmul.scala:589:17]
assign _ln_norm_cmd_T_2 = _GEN_16; // @[LoopMatmul.scala:589:17]
wire _ln_norm_cmd_T_6; // @[LoopMatmul.scala:590:17]
assign _ln_norm_cmd_T_6 = _GEN_16; // @[LoopMatmul.scala:589:17, :590:17]
wire [1:0] _ln_norm_cmd_T_3 = ln_cmd[1:0]; // @[LoopMatmul.scala:572:19]
wire [1:0] _ln_norm_cmd_T_4 = ln_cmd[1:0]; // @[LoopMatmul.scala:572:19]
wire [1:0] _ln_norm_cmd_T_7 = ln_cmd[1:0]; // @[LoopMatmul.scala:572:19]
wire [1:0] _ln_norm_cmd_T_8 = ln_cmd[1:0]; // @[LoopMatmul.scala:572:19]
wire [2:0] _ln_norm_cmd_T_5 = _ln_norm_cmd_T_2 ? _GEN_2[_ln_norm_cmd_T_3] : _GEN_1[_ln_norm_cmd_T_4]; // @[LoopMatmul.scala:589:{8,17}]
wire [2:0] _ln_norm_cmd_T_9 = _ln_norm_cmd_T_6 ? _GEN_0[_ln_norm_cmd_T_7] : _GEN[_ln_norm_cmd_T_8]; // @[LoopMatmul.scala:590:{8,17}]
wire [2:0] ln_norm_cmd = _ln_norm_cmd_T_1 ? _ln_norm_cmd_T_5 : _ln_norm_cmd_T_9; // @[LoopMatmul.scala:588:{24,41}, :589:8, :590:8]
wire [2:0] ln_mvout_cmd_rs2_local_addr_norm_cmd = ln_norm_cmd; // @[LoopMatmul.scala:588:24, :613:30]
wire [56:0] _ln_dram_offset_T_1 = {1'h0, _ln_dram_offset_T} + _GEN_4; // @[LoopMatmul.scala:550:58, :593:{28,46}]
wire [61:0] _ln_dram_offset_T_2 = {1'h0, _ln_dram_offset_T_1, 4'h0}; // @[LoopMatmul.scala:593:{46,52}]
wire [56:0] _ln_dram_offset_T_3 = {40'h0, ln_r} * {17'h0, req_dram_stride}; // @[LoopMatmul.scala:540:16, :550:40, :553:49, :585:21, :593:75]
wire [62:0] _ln_dram_offset_T_4 = {1'h0, _ln_dram_offset_T_2} + {6'h0, _ln_dram_offset_T_3}; // @[LoopMatmul.scala:593:{52,67,75}]
wire [63:0] ln_dram_offset = {1'h0, _ln_dram_offset_T_4}; // @[LoopMatmul.scala:593:{67,94}]
wire [63:0] _ln_dram_addr_T = {32'h0, ln_dram_offset[31:0]}; // @[LoopMatmul.scala:593:94, :1139:17]
wire [64:0] _ln_dram_addr_T_1 = _GEN_5 + {1'h0, _ln_dram_addr_T}; // @[LoopMatmul.scala:552:33, :594:36, :1139:17]
wire [63:0] ln_dram_addr = _ln_dram_addr_T_1[63:0]; // @[LoopMatmul.scala:594:36]
wire [63:0] ln_mvout_cmd_rs1 = ln_dram_addr; // @[LoopMatmul.scala:594:36, :608:26]
wire [7:0] ln_config_norm_rs1_norm_stats_id; // @[LoopMatmul.scala:596:32]
assign ln_config_norm_rs1_norm_stats_id = ln_stat_id[7:0]; // @[LoopMatmul.scala:573:23, :596:32, :600:36]
wire [63:0] _ln_config_norm_rs1_T; // @[LoopMatmul.scala:605:44]
wire [63:0] ln_config_norm_rs1; // @[LoopMatmul.scala:602:28]
wire [8:0] ln_config_norm_rs1_lo_hi = {1'h0, ln_config_norm_rs1_norm_stats_id}; // @[LoopMatmul.scala:596:32, :605:44]
wire [16:0] ln_config_norm_rs1_lo = {ln_config_norm_rs1_lo_hi, 8'h3}; // @[LoopMatmul.scala:605:44]
assign _ln_config_norm_rs1_T = {47'h1, ln_config_norm_rs1_lo}; // @[LoopMatmul.scala:605:44]
assign ln_config_norm_rs1 = _ln_config_norm_rs1_T; // @[LoopMatmul.scala:602:28, :605:44]
wire [63:0] _ln_mvout_cmd_rs2_T_2; // @[LoopMatmul.scala:619:40]
wire [63:0] ln_mvout_cmd_rs2; // @[LoopMatmul.scala:608:26]
wire [6:0] ln_mvout_cmd_rs2_lo_hi_1 = ln_mvout_cmd_rs2_num_cols; // @[LoopMatmul.scala:613:30, :619:40]
wire [2:0] _ln_mvout_cmd_rs2_T = ln_mvout_cmd_rs2_local_addr_norm_cmd; // @[LoopMatmul.scala:613:30, :619:40]
wire ln_mvout_cmd_rs2_local_addr_result_garbage_bit; // @[LocalAddr.scala:129:26]
wire [13:0] ln_mvout_cmd_rs2_local_addr_result_data; // @[LocalAddr.scala:129:26]
wire ln_mvout_cmd_rs2_local_addr_read_full_acc_row; // @[LoopMatmul.scala:613:30]
wire ln_mvout_cmd_rs2_local_addr_garbage_bit; // @[LoopMatmul.scala:613:30]
wire [13:0] ln_mvout_cmd_rs2_local_addr_data; // @[LoopMatmul.scala:613:30]
wire _ln_mvout_cmd_rs2_local_addr_result_result_T_6; // @[LocalAddr.scala:108:37]
wire _ln_mvout_cmd_rs2_local_addr_result_result_T_5; // @[LocalAddr.scala:108:37]
wire ln_mvout_cmd_rs2_local_addr_result_result_is_acc_addr = _ln_mvout_cmd_rs2_local_addr_result_result_WIRE_is_acc_addr; // @[LocalAddr.scala:108:{26,37}]
wire _ln_mvout_cmd_rs2_local_addr_result_result_T_4; // @[LocalAddr.scala:108:37]
wire ln_mvout_cmd_rs2_local_addr_result_result_accumulate = _ln_mvout_cmd_rs2_local_addr_result_result_WIRE_accumulate; // @[LocalAddr.scala:108:{26,37}]
wire [2:0] _ln_mvout_cmd_rs2_local_addr_result_result_WIRE_3; // @[LocalAddr.scala:108:37]
wire ln_mvout_cmd_rs2_local_addr_result_result_read_full_acc_row = _ln_mvout_cmd_rs2_local_addr_result_result_WIRE_read_full_acc_row; // @[LocalAddr.scala:108:{26,37}]
wire [10:0] _ln_mvout_cmd_rs2_local_addr_result_result_T_2; // @[LocalAddr.scala:108:37]
wire [2:0] ln_mvout_cmd_rs2_local_addr_result_result_norm_cmd = _ln_mvout_cmd_rs2_local_addr_result_result_WIRE_norm_cmd; // @[LocalAddr.scala:108:{26,37}]
wire _ln_mvout_cmd_rs2_local_addr_result_result_T_1; // @[LocalAddr.scala:108:37]
wire [13:0] _ln_mvout_cmd_rs2_local_addr_result_result_T; // @[LocalAddr.scala:108:37]
wire ln_mvout_cmd_rs2_local_addr_result_result_garbage_bit = _ln_mvout_cmd_rs2_local_addr_result_result_WIRE_garbage_bit; // @[LocalAddr.scala:108:{26,37}]
wire [13:0] ln_mvout_cmd_rs2_local_addr_result_result_data = _ln_mvout_cmd_rs2_local_addr_result_result_WIRE_data; // @[LocalAddr.scala:108:{26,37}]
wire [31:0] _ln_mvout_cmd_rs2_local_addr_result_result_WIRE_1 = ln_sp_addr[31:0]; // @[LoopMatmul.scala:587:74]
assign _ln_mvout_cmd_rs2_local_addr_result_result_T = _ln_mvout_cmd_rs2_local_addr_result_result_WIRE_1[13:0]; // @[LocalAddr.scala:108:37]
assign _ln_mvout_cmd_rs2_local_addr_result_result_WIRE_data = _ln_mvout_cmd_rs2_local_addr_result_result_T; // @[LocalAddr.scala:108:37]
assign _ln_mvout_cmd_rs2_local_addr_result_result_T_1 = _ln_mvout_cmd_rs2_local_addr_result_result_WIRE_1[14]; // @[LocalAddr.scala:108:37]
assign _ln_mvout_cmd_rs2_local_addr_result_result_WIRE_garbage_bit = _ln_mvout_cmd_rs2_local_addr_result_result_T_1; // @[LocalAddr.scala:108:37]
assign _ln_mvout_cmd_rs2_local_addr_result_result_T_2 = _ln_mvout_cmd_rs2_local_addr_result_result_WIRE_1[25:15]; // @[LocalAddr.scala:108:37]
wire [10:0] _ln_mvout_cmd_rs2_local_addr_result_result_WIRE_garbage = _ln_mvout_cmd_rs2_local_addr_result_result_T_2; // @[LocalAddr.scala:108:37]
wire [2:0] _ln_mvout_cmd_rs2_local_addr_result_result_T_3 = _ln_mvout_cmd_rs2_local_addr_result_result_WIRE_1[28:26]; // @[LocalAddr.scala:108:37]
wire [2:0] _ln_mvout_cmd_rs2_local_addr_result_result_WIRE_2 = _ln_mvout_cmd_rs2_local_addr_result_result_T_3; // @[LocalAddr.scala:108:37]
assign _ln_mvout_cmd_rs2_local_addr_result_result_WIRE_3 = _ln_mvout_cmd_rs2_local_addr_result_result_WIRE_2; // @[LocalAddr.scala:108:37]
assign _ln_mvout_cmd_rs2_local_addr_result_result_WIRE_norm_cmd = _ln_mvout_cmd_rs2_local_addr_result_result_WIRE_3; // @[LocalAddr.scala:108:37]
assign _ln_mvout_cmd_rs2_local_addr_result_result_T_4 = _ln_mvout_cmd_rs2_local_addr_result_result_WIRE_1[29]; // @[LocalAddr.scala:108:37]
assign _ln_mvout_cmd_rs2_local_addr_result_result_WIRE_read_full_acc_row = _ln_mvout_cmd_rs2_local_addr_result_result_T_4; // @[LocalAddr.scala:108:37]
assign _ln_mvout_cmd_rs2_local_addr_result_result_T_5 = _ln_mvout_cmd_rs2_local_addr_result_result_WIRE_1[30]; // @[LocalAddr.scala:108:37]
assign _ln_mvout_cmd_rs2_local_addr_result_result_WIRE_accumulate = _ln_mvout_cmd_rs2_local_addr_result_result_T_5; // @[LocalAddr.scala:108:37]
assign _ln_mvout_cmd_rs2_local_addr_result_result_T_6 = _ln_mvout_cmd_rs2_local_addr_result_result_WIRE_1[31]; // @[LocalAddr.scala:108:37]
assign _ln_mvout_cmd_rs2_local_addr_result_result_WIRE_is_acc_addr = _ln_mvout_cmd_rs2_local_addr_result_result_T_6; // @[LocalAddr.scala:108:37]
wire [2:0] ln_mvout_cmd_rs2_local_addr_result_norm_cmd = ln_mvout_cmd_rs2_local_addr_result_result_norm_cmd; // @[LocalAddr.scala:108:26, :129:26]
assign ln_mvout_cmd_rs2_local_addr_result_garbage_bit = ln_mvout_cmd_rs2_local_addr_result_result_garbage_bit; // @[LocalAddr.scala:108:26, :129:26]
assign ln_mvout_cmd_rs2_local_addr_result_data = ln_mvout_cmd_rs2_local_addr_result_result_data; // @[LocalAddr.scala:108:26, :129:26]
assign ln_mvout_cmd_rs2_local_addr_read_full_acc_row = ln_mvout_cmd_rs2_local_addr_result_read_full_acc_row; // @[LoopMatmul.scala:613:30]
assign ln_mvout_cmd_rs2_local_addr_garbage_bit = ln_mvout_cmd_rs2_local_addr_result_garbage_bit; // @[LoopMatmul.scala:613:30]
assign ln_mvout_cmd_rs2_local_addr_data = ln_mvout_cmd_rs2_local_addr_result_data; // @[LoopMatmul.scala:613:30]
wire [11:0] ln_mvout_cmd_rs2_lo_hi = {11'h0, ln_mvout_cmd_rs2_local_addr_garbage_bit}; // @[LoopMatmul.scala:613:30, :619:40]
wire [25:0] ln_mvout_cmd_rs2_lo = {ln_mvout_cmd_rs2_lo_hi, ln_mvout_cmd_rs2_local_addr_data}; // @[LoopMatmul.scala:613:30, :619:40]
wire [3:0] ln_mvout_cmd_rs2_hi_lo = {ln_mvout_cmd_rs2_local_addr_read_full_acc_row, _ln_mvout_cmd_rs2_T}; // @[LoopMatmul.scala:613:30, :619:40]
wire [5:0] ln_mvout_cmd_rs2_hi = {2'h2, ln_mvout_cmd_rs2_hi_lo}; // @[LoopMatmul.scala:619:40]
wire [31:0] _ln_mvout_cmd_rs2_T_1 = {ln_mvout_cmd_rs2_hi, ln_mvout_cmd_rs2_lo}; // @[LoopMatmul.scala:619:40]
wire [38:0] ln_mvout_cmd_rs2_lo_1 = {ln_mvout_cmd_rs2_lo_hi_1, _ln_mvout_cmd_rs2_T_1}; // @[LoopMatmul.scala:619:40]
assign _ln_mvout_cmd_rs2_T_2 = {25'h200, ln_mvout_cmd_rs2_lo_1}; // @[LoopMatmul.scala:619:40]
assign ln_mvout_cmd_rs2 = _ln_mvout_cmd_rs2_T_2; // @[LoopMatmul.scala:608:26, :619:40]
assign _io_req_ready_T = ~(|state); // @[LoopMatmul.scala:538:22, :621:25]
assign io_req_ready_0 = _io_req_ready_T; // @[LoopMatmul.scala:514:7, :621:25]
assign _io_idle_T = ~(|state); // @[LoopMatmul.scala:538:22, :621:25, :624:20]
assign io_idle_0 = _io_idle_T; // @[LoopMatmul.scala:514:7, :624:20]
wire _ex_ahead_T = req_act != 3'h2; // @[LoopMatmul.scala:540:16, :589:17, :628:15]
wire _ex_ahead_T_1 = req_act != 3'h4; // @[LoopMatmul.scala:540:16, :628:53]
wire _ex_ahead_T_2 = _ex_ahead_T & _ex_ahead_T_1; // @[LoopMatmul.scala:628:{15,41,53}]
wire [16:0] _ex_ahead_T_3 = {1'h0, req_max_k} - 17'h1; // @[LoopMatmul.scala:540:16, :629:30]
wire [15:0] _ex_ahead_T_4 = _ex_ahead_T_3[15:0]; // @[LoopMatmul.scala:629:30]
wire _ex_ahead_T_5 = io_ex_k_0 == _ex_ahead_T_4; // @[LoopMatmul.scala:514:7, :629:{16,30}]
wire [15:0] _ex_ahead_T_7 = _ex_ahead_T_6[15:0]; // @[LoopMatmul.scala:630:23]
wire _ex_ahead_T_8 = io_ex_j_0 >= _ex_ahead_T_7; // @[LoopMatmul.scala:514:7, :630:{18,23}]
wire [15:0] _ex_ahead_T_10 = _ex_ahead_T_9[15:0]; // @[LoopMatmul.scala:631:27]
wire [16:0] _ex_ahead_T_11 = {1'h0, _ex_ahead_T_10} - 17'h1; // @[LoopMatmul.scala:631:{27,36}]
wire [15:0] _ex_ahead_T_12 = _ex_ahead_T_11[15:0]; // @[LoopMatmul.scala:631:36]
wire _ex_ahead_T_13 = io_ex_j_0 == _ex_ahead_T_12; // @[LoopMatmul.scala:514:7, :631:{21,36}]
wire _GEN_17 = io_ex_i_0 > i; // @[LoopMatmul.scala:514:7, :546:14, :631:54]
wire _ex_ahead_T_14; // @[LoopMatmul.scala:631:54]
assign _ex_ahead_T_14 = _GEN_17; // @[LoopMatmul.scala:631:54]
wire _ex_ahead_T_20; // @[LoopMatmul.scala:633:45]
assign _ex_ahead_T_20 = _GEN_17; // @[LoopMatmul.scala:631:54, :633:45]
wire _ex_ahead_T_15 = _ex_ahead_T_13 & _ex_ahead_T_14; // @[LoopMatmul.scala:631:{21,43,54}]
wire _ex_ahead_T_16 = _ex_ahead_T_8 | _ex_ahead_T_15; // @[LoopMatmul.scala:630:{18,32}, :631:43]
wire _ex_ahead_T_17 = _ex_ahead_T_5 & _ex_ahead_T_16; // @[LoopMatmul.scala:629:{16,36}, :630:32]
wire _ex_ahead_T_18 = _ex_ahead_T_2 & _ex_ahead_T_17; // @[LoopMatmul.scala:628:{41,77}, :629:36]
wire _ex_ahead_T_19 = io_ex_completed_0 | _ex_ahead_T_18; // @[LoopMatmul.scala:514:7, :627:43, :628:77]
wire ex_ahead; // @[LoopMatmul.scala:627:26]
wire _ex_ahead_T_21 = io_ex_i_0 == i; // @[LoopMatmul.scala:514:7, :546:14, :633:61]
wire [15:0] _ex_ahead_T_23 = _ex_ahead_T_22[15:0]; // @[LoopMatmul.scala:633:83]
wire _ex_ahead_T_24 = io_ex_j_0 >= _ex_ahead_T_23; // @[LoopMatmul.scala:514:7, :633:{78,83}]
wire _ex_ahead_T_25 = _ex_ahead_T_21 & _ex_ahead_T_24; // @[LoopMatmul.scala:633:{61,67,78}]
wire _ex_ahead_T_26 = _ex_ahead_T_20 | _ex_ahead_T_25; // @[LoopMatmul.scala:633:{45,49,67}]
wire _ex_ahead_T_27 = io_ex_completed_0 | _ex_ahead_T_26; // @[LoopMatmul.scala:514:7, :633:{33,49}]
assign ex_ahead = req_is_resadd ? _ex_ahead_T_27 : _ex_ahead_T_19; // @[LoopMatmul.scala:540:16, :627:{26,43}, :632:22, :633:{14,33}]
wire _io_cmd_valid_T = |state; // @[LoopMatmul.scala:538:22, :621:25, :636:25]
wire _io_cmd_valid_T_1 = ~io_rob_overloaded_0; // @[LoopMatmul.scala:514:7, :636:37]
wire _io_cmd_valid_T_2 = _io_cmd_valid_T & _io_cmd_valid_T_1; // @[LoopMatmul.scala:636:{25,34,37}]
wire _io_cmd_valid_T_3 = _io_cmd_valid_T_2 & ex_ahead; // @[LoopMatmul.scala:627:26, :636:{34,56}]
wire _io_cmd_valid_T_4 = |req_dram_addr; // @[LoopMatmul.scala:540:16, :636:85]
assign _io_cmd_valid_T_5 = _io_cmd_valid_T_3 & _io_cmd_valid_T_4; // @[LoopMatmul.scala:636:{56,68,85}]
assign io_cmd_valid_0 = _io_cmd_valid_T_5; // @[LoopMatmul.scala:514:7, :636:68]
wire _io_cmd_bits_T = state == 2'h2; // @[LoopMatmul.scala:538:22, :638:12]
wire _io_cmd_bits_T_1 = &state; // @[LoopMatmul.scala:538:22, :639:12]
wire [63:0] _io_cmd_bits_T_2_rs1 = _io_cmd_bits_T_1 ? ln_mvout_cmd_rs1 : mvout_cmd_rs1; // @[Mux.scala:126:16]
wire [63:0] _io_cmd_bits_T_2_rs2 = _io_cmd_bits_T_1 ? ln_mvout_cmd_rs2 : mvout_cmd_rs2; // @[Mux.scala:126:16]
assign _io_cmd_bits_T_3_inst_funct = _io_cmd_bits_T ? 7'h0 : 7'h3; // @[Mux.scala:126:16]
assign _io_cmd_bits_T_3_rs1 = _io_cmd_bits_T ? ln_config_norm_rs1 : _io_cmd_bits_T_2_rs1; // @[Mux.scala:126:16]
assign _io_cmd_bits_T_3_rs2 = _io_cmd_bits_T ? 64'h0 : _io_cmd_bits_T_2_rs2; // @[Mux.scala:126:16]
assign io_cmd_bits_inst_funct_0 = _io_cmd_bits_T_3_inst_funct; // @[Mux.scala:126:16]
assign io_cmd_bits_rs1_0 = _io_cmd_bits_T_3_rs1; // @[Mux.scala:126:16]
assign io_cmd_bits_rs2_0 = _io_cmd_bits_T_3_rs2; // @[Mux.scala:126:16]
wire [15:0] next_i_max = _next_i_max_T[15:0]; // @[Util.scala:39:28]
wire [16:0] _GEN_18 = {1'h0, i} + 17'h1; // @[Util.scala:41:15]
wire [16:0] _next_i_T; // @[Util.scala:41:15]
assign _next_i_T = _GEN_18; // @[Util.scala:41:15]
wire [16:0] _next_i_T_3; // @[Util.scala:43:11]
assign _next_i_T_3 = _GEN_18; // @[Util.scala:41:15, :43:11]
wire [16:0] _next_i_T_13; // @[Util.scala:41:15]
assign _next_i_T_13 = _GEN_18; // @[Util.scala:41:15]
wire [16:0] _next_i_T_16; // @[Util.scala:43:11]
assign _next_i_T_16 = _GEN_18; // @[Util.scala:41:15, :43:11]
wire [15:0] _next_i_T_1 = _next_i_T[15:0]; // @[Util.scala:41:15]
wire _next_i_T_4 = _next_i_T_3 > {1'h0, next_i_max}; // @[Util.scala:39:28, :43:{11,17}]
wire [15:0] _next_i_T_5 = _next_i_T_4 ? 16'h0 : _next_i_T_1; // @[Mux.scala:126:16]
wire [15:0] next_i = _next_i_T_5; // @[Mux.scala:126:16]
wire _next_j_T = next_i == 16'h0; // @[Mux.scala:126:16]
wire [16:0] _GEN_19 = _GEN_10 - 17'h1; // @[Util.scala:39:28]
wire [16:0] _next_j_max_T; // @[Util.scala:39:28]
assign _next_j_max_T = _GEN_19; // @[Util.scala:39:28]
wire [16:0] _next_j_max_T_1; // @[Util.scala:39:28]
assign _next_j_max_T_1 = _GEN_19; // @[Util.scala:39:28]
wire [15:0] next_j_max = _next_j_max_T[15:0]; // @[Util.scala:39:28]
wire [15:0] _next_j_T_2 = _next_j_T_1[15:0]; // @[Util.scala:41:15]
wire _next_j_T_3 = ~_next_j_T; // @[Util.scala:42:8]
wire _next_j_T_5 = _next_j_T_4 > {1'h0, next_j_max}; // @[Util.scala:39:28, :43:{11,17}]
wire [15:0] _next_j_T_6 = _next_j_T_5 ? 16'h0 : _next_j_T_2; // @[Mux.scala:126:16]
wire [15:0] next_j = _next_j_T_3 ? j : _next_j_T_6; // @[Mux.scala:126:16]
wire [15:0] next_j_max_1 = _next_j_max_T_1[15:0]; // @[Util.scala:39:28]
wire [15:0] _next_j_T_8 = _next_j_T_7[15:0]; // @[Util.scala:41:15]
wire _next_j_T_11 = _next_j_T_10 > {1'h0, next_j_max_1}; // @[Util.scala:39:28, :43:{11,17}]
wire [15:0] _next_j_T_12 = _next_j_T_11 ? 16'h0 : _next_j_T_8; // @[Mux.scala:126:16]
wire [15:0] next_j_1 = _next_j_T_12; // @[Mux.scala:126:16]
wire _T_22 = next_j_1 == 16'h0; // @[Mux.scala:126:16]
wire _next_stat_id_T; // @[LoopMatmul.scala:661:70]
assign _next_stat_id_T = _T_22; // @[LoopMatmul.scala:661:70]
wire _next_cmd_T; // @[LoopMatmul.scala:662:70]
assign _next_cmd_T = _T_22; // @[LoopMatmul.scala:661:70, :662:70]
wire _next_row_T; // @[LoopMatmul.scala:663:67]
assign _next_row_T = _T_22; // @[LoopMatmul.scala:661:70, :663:67]
wire _next_i_T_6; // @[LoopMatmul.scala:665:14]
assign _next_i_T_6 = _T_22; // @[LoopMatmul.scala:661:70, :665:14]
wire [17:0] _next_stat_id_max_T = {1'h0, ln_stat_ids} - 18'h1; // @[Util.scala:39:28]
wire [16:0] next_stat_id_max = _next_stat_id_max_T[16:0]; // @[Util.scala:39:28]
wire [16:0] _GEN_20 = _GEN_15 + 17'h1; // @[Util.scala:41:15]
wire [16:0] _next_stat_id_T_1; // @[Util.scala:41:15]
assign _next_stat_id_T_1 = _GEN_20; // @[Util.scala:41:15]
wire [16:0] _next_stat_id_T_4; // @[Util.scala:43:11]
assign _next_stat_id_T_4 = _GEN_20; // @[Util.scala:41:15, :43:11]
wire [15:0] _next_stat_id_T_2 = _next_stat_id_T_1[15:0]; // @[Util.scala:41:15]
wire _next_stat_id_T_3 = ~_next_stat_id_T; // @[Util.scala:42:8]
wire _next_stat_id_T_5 = _next_stat_id_T_4 > next_stat_id_max; // @[Util.scala:39:28, :43:{11,17}]
wire [15:0] _next_stat_id_T_6 = _next_stat_id_T_5 ? 16'h0 : _next_stat_id_T_2; // @[Mux.scala:126:16]
wire [15:0] next_stat_id = _next_stat_id_T_3 ? ln_stat_id : _next_stat_id_T_6; // @[Mux.scala:126:16]
wire _T_18 = next_stat_id == 16'h0; // @[Mux.scala:126:16]
wire _next_cmd_T_1; // @[LoopMatmul.scala:662:94]
assign _next_cmd_T_1 = _T_18; // @[LoopMatmul.scala:662:94]
wire _next_row_T_1; // @[LoopMatmul.scala:663:91]
assign _next_row_T_1 = _T_18; // @[LoopMatmul.scala:662:94, :663:91]
wire _next_i_T_7; // @[LoopMatmul.scala:665:38]
assign _next_i_T_7 = _T_18; // @[LoopMatmul.scala:662:94, :665:38]
wire _next_cmd_T_2 = _next_cmd_T & _next_cmd_T_1; // @[LoopMatmul.scala:662:{70,78,94}]
wire [16:0] _GEN_21 = {1'h0, ln_cmd} + 17'h1; // @[Util.scala:41:15]
wire [16:0] _next_cmd_T_3; // @[Util.scala:41:15]
assign _next_cmd_T_3 = _GEN_21; // @[Util.scala:41:15]
wire [16:0] _next_cmd_T_6; // @[Util.scala:43:11]
assign _next_cmd_T_6 = _GEN_21; // @[Util.scala:41:15, :43:11]
wire [15:0] _next_cmd_T_4 = _next_cmd_T_3[15:0]; // @[Util.scala:41:15]
wire _next_cmd_T_5 = ~_next_cmd_T_2; // @[Util.scala:42:8]
wire _next_cmd_T_7 = _next_cmd_T_6 > 17'h2; // @[Util.scala:43:{11,17}]
wire [15:0] _next_cmd_T_8 = _next_cmd_T_7 ? 16'h0 : _next_cmd_T_4; // @[Mux.scala:126:16]
wire [15:0] next_cmd = _next_cmd_T_5 ? ln_cmd : _next_cmd_T_8; // @[Mux.scala:126:16]
wire _next_row_T_2 = _next_row_T & _next_row_T_1; // @[LoopMatmul.scala:663:{67,75,91}]
wire _T_16 = next_cmd == 16'h0; // @[Mux.scala:126:16]
wire _next_row_T_3; // @[LoopMatmul.scala:663:111]
assign _next_row_T_3 = _T_16; // @[LoopMatmul.scala:663:111]
wire _next_i_T_9; // @[LoopMatmul.scala:665:58]
assign _next_i_T_9 = _T_16; // @[LoopMatmul.scala:663:111, :665:58]
wire _next_row_T_4 = _next_row_T_2 & _next_row_T_3; // @[LoopMatmul.scala:663:{75,99,111}]
wire [5:0] _next_row_max_T = {1'h0, rows} - 6'h1; // @[Util.scala:39:28]
wire [4:0] next_row_max = _next_row_max_T[4:0]; // @[Util.scala:39:28]
wire [16:0] _GEN_22 = _GEN_13 + 17'h2; // @[Util.scala:41:15]
wire [16:0] _next_row_T_5; // @[Util.scala:41:15]
assign _next_row_T_5 = _GEN_22; // @[Util.scala:41:15]
wire [16:0] _next_row_T_8; // @[Util.scala:43:11]
assign _next_row_T_8 = _GEN_22; // @[Util.scala:41:15, :43:11]
wire [15:0] _next_row_T_6 = _next_row_T_5[15:0]; // @[Util.scala:41:15]
wire _next_row_T_7 = ~_next_row_T_4; // @[Util.scala:42:8]
wire _next_row_T_9 = _next_row_T_8 > {12'h0, next_row_max}; // @[Util.scala:39:28, :43:{11,17}]
wire [15:0] _next_row_T_10 = _next_row_T_9 ? 16'h0 : _next_row_T_6; // @[Mux.scala:126:16]
wire [15:0] next_row = _next_row_T_7 ? ln_row : _next_row_T_10; // @[Mux.scala:126:16]
wire _next_i_T_8 = _next_i_T_6 & _next_i_T_7; // @[LoopMatmul.scala:665:{14,22,38}]
wire _next_i_T_10 = _next_i_T_8 & _next_i_T_9; // @[LoopMatmul.scala:665:{22,46,58}]
wire _next_i_T_11 = next_row == 16'h0; // @[Mux.scala:126:16]
wire _next_i_T_12 = _next_i_T_10 & _next_i_T_11; // @[LoopMatmul.scala:665:{46,66,78}]
wire [15:0] next_i_max_1 = _next_i_max_T_1[15:0]; // @[Util.scala:39:28]
wire [15:0] _next_i_T_14 = _next_i_T_13[15:0]; // @[Util.scala:41:15]
wire _next_i_T_15 = ~_next_i_T_12; // @[Util.scala:42:8]
wire _next_i_T_17 = _next_i_T_16 > {1'h0, next_i_max_1}; // @[Util.scala:39:28, :43:{11,17}]
wire [15:0] _next_i_T_18 = _next_i_T_17 ? 16'h0 : _next_i_T_14; // @[Mux.scala:126:16]
wire [15:0] next_i_1 = _next_i_T_15 ? i : _next_i_T_18; // @[Mux.scala:126:16]
wire _state_T = io_req_bits_act_0 == 3'h2; // @[LoopMatmul.scala:514:7, :589:17, :682:35]
wire _state_T_1 = io_req_bits_act_0 == 3'h4; // @[LoopMatmul.scala:514:7, :682:81]
wire _state_T_2 = _state_T | _state_T_1; // @[LoopMatmul.scala:682:{35,61,81}]
wire [1:0] _state_T_3 = _state_T_2 ? 2'h2 : 2'h1; // @[LoopMatmul.scala:646:36, :682:{17,61}]
wire _T_10 = io_cmd_ready_0 & io_cmd_valid_0; // @[Decoupled.scala:51:35]
wire _T_3 = _T_10 & state == 2'h1; // @[Decoupled.scala:51:35]
wire _T_9 = _T_10 & _io_cmd_bits_T; // @[Decoupled.scala:51:35]
wire _T_12 = _T_10 & (&state); // @[Decoupled.scala:51:35]
wire _T_23 = io_req_ready_0 & io_req_valid_0; // @[Decoupled.scala:51:35]
always @(posedge clock) begin // @[LoopMatmul.scala:514:7]
if (reset) // @[LoopMatmul.scala:514:7]
state <= 2'h0; // @[LoopMatmul.scala:538:22]
else if (_T_23) // @[Decoupled.scala:51:35]
state <= _state_T_3; // @[LoopMatmul.scala:538:22, :682:17]
else if (|req_dram_addr) begin // @[LoopMatmul.scala:540:16, :636:85]
if (_T_3) begin // @[LoopMatmul.scala:646:27]
if (_next_j_T & next_j == 16'h0) // @[Mux.scala:126:16]
state <= 2'h0; // @[LoopMatmul.scala:538:22]
end
else if (_T_9) // @[LoopMatmul.scala:657:27]
state <= 2'h3; // @[LoopMatmul.scala:538:22]
else if (_T_12) begin // @[LoopMatmul.scala:659:27]
if (next_i_1 == 16'h0 & _next_i_T_11 & _T_16 & _T_18 & _T_22) // @[Mux.scala:126:16]
state <= 2'h0; // @[LoopMatmul.scala:538:22]
else if (_T_22) // @[LoopMatmul.scala:661:70]
state <= 2'h2; // @[LoopMatmul.scala:538:22]
end
end
else // @[LoopMatmul.scala:636:85]
state <= 2'h0; // @[LoopMatmul.scala:538:22]
if (_T_23) begin // @[Decoupled.scala:51:35]
req_max_k <= io_req_bits_max_k_0; // @[LoopMatmul.scala:514:7, :540:16]
req_max_j <= io_req_bits_max_j_0; // @[LoopMatmul.scala:514:7, :540:16]
req_max_i <= io_req_bits_max_i_0; // @[LoopMatmul.scala:514:7, :540:16]
req_pad_j <= io_req_bits_pad_j_0; // @[LoopMatmul.scala:514:7, :540:16]
req_pad_i <= io_req_bits_pad_i_0; // @[LoopMatmul.scala:514:7, :540:16]
req_dram_addr <= io_req_bits_dram_addr_0; // @[LoopMatmul.scala:514:7, :540:16]
req_dram_stride <= io_req_bits_dram_stride_0; // @[LoopMatmul.scala:514:7, :540:16]
req_full_c <= io_req_bits_full_c_0; // @[LoopMatmul.scala:514:7, :540:16]
req_act <= io_req_bits_act_0; // @[LoopMatmul.scala:514:7, :540:16]
req_addr_start <= io_req_bits_addr_start_0; // @[LoopMatmul.scala:514:7, :540:16]
req_loop_id <= io_req_bits_loop_id_0; // @[LoopMatmul.scala:514:7, :540:16]
req_is_resadd <= io_req_bits_is_resadd_0; // @[LoopMatmul.scala:514:7, :540:16]
j <= 16'h0; // @[LoopMatmul.scala:545:14]
i <= 16'h0; // @[LoopMatmul.scala:546:14]
ln_row <= 16'h0; // @[LoopMatmul.scala:571:19]
ln_cmd <= 16'h0; // @[LoopMatmul.scala:572:19]
ln_stat_id <= 16'h0; // @[LoopMatmul.scala:573:23]
end
else begin // @[Decoupled.scala:51:35]
if (|req_dram_addr) begin // @[LoopMatmul.scala:540:16, :636:85]
if (_T_3) begin // @[LoopMatmul.scala:646:27]
j <= next_j; // @[Mux.scala:126:16]
i <= next_i; // @[Mux.scala:126:16]
end
else if (_T_9 | ~_T_12) begin // @[LoopMatmul.scala:545:14, :546:14, :657:{27,51}, :659:{27,47}]
end
else begin // @[LoopMatmul.scala:546:14, :657:51, :659:47]
j <= next_j_1; // @[Mux.scala:126:16]
i <= next_i_1; // @[Mux.scala:126:16]
end
end
if (~(|req_dram_addr) | _T_3 | _T_9 | ~_T_12) begin // @[LoopMatmul.scala:540:16, :545:14, :573:23, :636:85, :644:{23,32}, :646:{27,44}, :657:{27,51}, :659:{27,47}]
end
else begin // @[LoopMatmul.scala:573:23, :644:32, :646:44, :657:51, :659:47]
ln_row <= next_row; // @[Mux.scala:126:16]
ln_cmd <= next_cmd; // @[Mux.scala:126:16]
ln_stat_id <= next_stat_id; // @[Mux.scala:126:16]
end
end
always @(posedge)
assign io_req_ready = io_req_ready_0; // @[LoopMatmul.scala:514:7]
assign io_cmd_valid = io_cmd_valid_0; // @[LoopMatmul.scala:514:7]
assign io_cmd_bits_inst_funct = io_cmd_bits_inst_funct_0; // @[LoopMatmul.scala:514:7]
assign io_cmd_bits_rs1 = io_cmd_bits_rs1_0; // @[LoopMatmul.scala:514:7]
assign io_cmd_bits_rs2 = io_cmd_bits_rs2_0; // @[LoopMatmul.scala:514:7]
assign io_idle = io_idle_0; // @[LoopMatmul.scala:514:7]
assign io_loop_id = io_loop_id_0; // @[LoopMatmul.scala:514:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File util.scala:
//******************************************************************************
// Copyright (c) 2015 - 2019, The Regents of the University of California (Regents).
// All Rights Reserved. See LICENSE and LICENSE.SiFive for license details.
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
// Utility Functions
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
package boom.v4.util
import chisel3._
import chisel3.util._
import freechips.rocketchip.rocket.Instructions._
import freechips.rocketchip.rocket._
import freechips.rocketchip.util.{Str}
import org.chipsalliance.cde.config.{Parameters}
import freechips.rocketchip.tile.{TileKey}
import boom.v4.common.{MicroOp}
import boom.v4.exu.{BrUpdateInfo}
/**
* Object to XOR fold a input register of fullLength into a compressedLength.
*/
object Fold
{
def apply(input: UInt, compressedLength: Int, fullLength: Int): UInt = {
val clen = compressedLength
val hlen = fullLength
if (hlen <= clen) {
input
} else {
var res = 0.U(clen.W)
var remaining = input.asUInt
for (i <- 0 to hlen-1 by clen) {
val len = if (i + clen > hlen ) (hlen - i) else clen
require(len > 0)
res = res(clen-1,0) ^ remaining(len-1,0)
remaining = remaining >> len.U
}
res
}
}
}
/**
* Object to check if MicroOp was killed due to a branch mispredict.
* Uses "Fast" branch masks
*/
object IsKilledByBranch
{
def apply(brupdate: BrUpdateInfo, flush: Bool, uop: MicroOp): Bool = {
return apply(brupdate, flush, uop.br_mask)
}
def apply(brupdate: BrUpdateInfo, flush: Bool, uop_mask: UInt): Bool = {
return maskMatch(brupdate.b1.mispredict_mask, uop_mask) || flush
}
def apply[T <: boom.v4.common.HasBoomUOP](brupdate: BrUpdateInfo, flush: Bool, bundle: T): Bool = {
return apply(brupdate, flush, bundle.uop)
}
def apply[T <: boom.v4.common.HasBoomUOP](brupdate: BrUpdateInfo, flush: Bool, bundle: Valid[T]): Bool = {
return apply(brupdate, flush, bundle.bits)
}
}
/**
* Object to return new MicroOp with a new BR mask given a MicroOp mask
* and old BR mask.
*/
object GetNewUopAndBrMask
{
def apply(uop: MicroOp, brupdate: BrUpdateInfo)
(implicit p: Parameters): MicroOp = {
val newuop = WireInit(uop)
newuop.br_mask := uop.br_mask & ~brupdate.b1.resolve_mask
newuop
}
}
/**
* Object to return a BR mask given a MicroOp mask and old BR mask.
*/
object GetNewBrMask
{
def apply(brupdate: BrUpdateInfo, uop: MicroOp): UInt = {
return uop.br_mask & ~brupdate.b1.resolve_mask
}
def apply(brupdate: BrUpdateInfo, br_mask: UInt): UInt = {
return br_mask & ~brupdate.b1.resolve_mask
}
}
object UpdateBrMask
{
def apply(brupdate: BrUpdateInfo, uop: MicroOp): MicroOp = {
val out = WireInit(uop)
out.br_mask := GetNewBrMask(brupdate, uop)
out
}
def apply[T <: boom.v4.common.HasBoomUOP](brupdate: BrUpdateInfo, bundle: T): T = {
val out = WireInit(bundle)
out.uop.br_mask := GetNewBrMask(brupdate, bundle.uop.br_mask)
out
}
def apply[T <: boom.v4.common.HasBoomUOP](brupdate: BrUpdateInfo, flush: Bool, bundle: Valid[T]): Valid[T] = {
val out = WireInit(bundle)
out.bits.uop.br_mask := GetNewBrMask(brupdate, bundle.bits.uop.br_mask)
out.valid := bundle.valid && !IsKilledByBranch(brupdate, flush, bundle.bits.uop.br_mask)
out
}
}
/**
* Object to check if at least 1 bit matches in two masks
*/
object maskMatch
{
def apply(msk1: UInt, msk2: UInt): Bool = (msk1 & msk2) =/= 0.U
}
/**
* Object to clear one bit in a mask given an index
*/
object clearMaskBit
{
def apply(msk: UInt, idx: UInt): UInt = (msk & ~(1.U << idx))(msk.getWidth-1, 0)
}
/**
* Object to shift a register over by one bit and concat a new one
*/
object PerformShiftRegister
{
def apply(reg_val: UInt, new_bit: Bool): UInt = {
reg_val := Cat(reg_val(reg_val.getWidth-1, 0).asUInt, new_bit.asUInt).asUInt
reg_val
}
}
/**
* Object to shift a register over by one bit, wrapping the top bit around to the bottom
* (XOR'ed with a new-bit), and evicting a bit at index HLEN.
* This is used to simulate a longer HLEN-width shift register that is folded
* down to a compressed CLEN.
*/
object PerformCircularShiftRegister
{
def apply(csr: UInt, new_bit: Bool, evict_bit: Bool, hlen: Int, clen: Int): UInt = {
val carry = csr(clen-1)
val newval = Cat(csr, new_bit ^ carry) ^ (evict_bit << (hlen % clen).U)
newval
}
}
/**
* Object to increment an input value, wrapping it if
* necessary.
*/
object WrapAdd
{
// "n" is the number of increments, so we wrap at n-1.
def apply(value: UInt, amt: UInt, n: Int): UInt = {
if (isPow2(n)) {
(value + amt)(log2Ceil(n)-1,0)
} else {
val sum = Cat(0.U(1.W), value) + Cat(0.U(1.W), amt)
Mux(sum >= n.U,
sum - n.U,
sum)
}
}
}
/**
* Object to decrement an input value, wrapping it if
* necessary.
*/
object WrapSub
{
// "n" is the number of increments, so we wrap to n-1.
def apply(value: UInt, amt: Int, n: Int): UInt = {
if (isPow2(n)) {
(value - amt.U)(log2Ceil(n)-1,0)
} else {
val v = Cat(0.U(1.W), value)
val b = Cat(0.U(1.W), amt.U)
Mux(value >= amt.U,
value - amt.U,
n.U - amt.U + value)
}
}
}
/**
* Object to increment an input value, wrapping it if
* necessary.
*/
object WrapInc
{
// "n" is the number of increments, so we wrap at n-1.
def apply(value: UInt, n: Int): UInt = {
if (isPow2(n)) {
(value + 1.U)(log2Ceil(n)-1,0)
} else {
val wrap = (value === (n-1).U)
Mux(wrap, 0.U, value + 1.U)
}
}
}
/**
* Object to decrement an input value, wrapping it if
* necessary.
*/
object WrapDec
{
// "n" is the number of increments, so we wrap at n-1.
def apply(value: UInt, n: Int): UInt = {
if (isPow2(n)) {
(value - 1.U)(log2Ceil(n)-1,0)
} else {
val wrap = (value === 0.U)
Mux(wrap, (n-1).U, value - 1.U)
}
}
}
/**
* Object to mask off lower bits of a PC to align to a "b"
* Byte boundary.
*/
object AlignPCToBoundary
{
def apply(pc: UInt, b: Int): UInt = {
// Invert for scenario where pc longer than b
// (which would clear all bits above size(b)).
~(~pc | (b-1).U)
}
}
/**
* Object to rotate a signal left by one
*/
object RotateL1
{
def apply(signal: UInt): UInt = {
val w = signal.getWidth
val out = Cat(signal(w-2,0), signal(w-1))
return out
}
}
/**
* Object to sext a value to a particular length.
*/
object Sext
{
def apply(x: UInt, length: Int): UInt = {
if (x.getWidth == length) return x
else return Cat(Fill(length-x.getWidth, x(x.getWidth-1)), x)
}
}
/**
* Object to translate from BOOM's special "packed immediate" to a 32b signed immediate
* Asking for U-type gives it shifted up 12 bits.
*/
object ImmGen
{
import boom.v4.common.{LONGEST_IMM_SZ, IS_B, IS_I, IS_J, IS_S, IS_U, IS_N}
def apply(i: UInt, isel: UInt): UInt = {
val ip = Mux(isel === IS_N, 0.U(LONGEST_IMM_SZ.W), i)
val sign = ip(LONGEST_IMM_SZ-1).asSInt
val i30_20 = Mux(isel === IS_U, ip(18,8).asSInt, sign)
val i19_12 = Mux(isel === IS_U || isel === IS_J, ip(7,0).asSInt, sign)
val i11 = Mux(isel === IS_U, 0.S,
Mux(isel === IS_J || isel === IS_B, ip(8).asSInt, sign))
val i10_5 = Mux(isel === IS_U, 0.S, ip(18,14).asSInt)
val i4_1 = Mux(isel === IS_U, 0.S, ip(13,9).asSInt)
val i0 = Mux(isel === IS_S || isel === IS_I, ip(8).asSInt, 0.S)
return Cat(sign, i30_20, i19_12, i11, i10_5, i4_1, i0)
}
}
/**
* Object to see if an instruction is a JALR.
*/
object DebugIsJALR
{
def apply(inst: UInt): Bool = {
// TODO Chisel not sure why this won't compile
// val is_jalr = rocket.DecodeLogic(inst, List(Bool(false)),
// Array(
// JALR -> Bool(true)))
inst(6,0) === "b1100111".U
}
}
/**
* Object to take an instruction and output its branch or jal target. Only used
* for a debug assert (no where else would we jump straight from instruction
* bits to a target).
*/
object DebugGetBJImm
{
def apply(inst: UInt): UInt = {
// TODO Chisel not sure why this won't compile
//val csignals =
//rocket.DecodeLogic(inst,
// List(Bool(false), Bool(false)),
// Array(
// BEQ -> List(Bool(true ), Bool(false)),
// BNE -> List(Bool(true ), Bool(false)),
// BGE -> List(Bool(true ), Bool(false)),
// BGEU -> List(Bool(true ), Bool(false)),
// BLT -> List(Bool(true ), Bool(false)),
// BLTU -> List(Bool(true ), Bool(false))
// ))
//val is_br :: nothing :: Nil = csignals
val is_br = (inst(6,0) === "b1100011".U)
val br_targ = Cat(Fill(12, inst(31)), Fill(8,inst(31)), inst(7), inst(30,25), inst(11,8), 0.U(1.W))
val jal_targ= Cat(Fill(12, inst(31)), inst(19,12), inst(20), inst(30,25), inst(24,21), 0.U(1.W))
Mux(is_br, br_targ, jal_targ)
}
}
/**
* Object to return the lowest bit position after the head.
*/
object AgePriorityEncoder
{
def apply(in: Seq[Bool], head: UInt): UInt = {
val n = in.size
val width = log2Ceil(in.size)
val n_padded = 1 << width
val temp_vec = (0 until n_padded).map(i => if (i < n) in(i) && i.U >= head else false.B) ++ in
val idx = PriorityEncoder(temp_vec)
idx(width-1, 0) //discard msb
}
}
/**
* Object to determine whether queue
* index i0 is older than index i1.
*/
object IsOlder
{
def apply(i0: UInt, i1: UInt, head: UInt) = ((i0 < i1) ^ (i0 < head) ^ (i1 < head))
}
object IsYoungerMask
{
def apply(i: UInt, head: UInt, n: Integer): UInt = {
val hi_mask = ~MaskLower(UIntToOH(i)(n-1,0))
val lo_mask = ~MaskUpper(UIntToOH(head)(n-1,0))
Mux(i < head, hi_mask & lo_mask, hi_mask | lo_mask)(n-1,0)
}
}
/**
* Set all bits at or below the highest order '1'.
*/
object MaskLower
{
def apply(in: UInt) = {
val n = in.getWidth
(0 until n).map(i => in >> i.U).reduce(_|_)
}
}
/**
* Set all bits at or above the lowest order '1'.
*/
object MaskUpper
{
def apply(in: UInt) = {
val n = in.getWidth
(0 until n).map(i => (in << i.U)(n-1,0)).reduce(_|_)
}
}
/**
* Transpose a matrix of Chisel Vecs.
*/
object Transpose
{
def apply[T <: chisel3.Data](in: Vec[Vec[T]]) = {
val n = in(0).size
VecInit((0 until n).map(i => VecInit(in.map(row => row(i)))))
}
}
/**
* N-wide one-hot priority encoder.
*/
object SelectFirstN
{
def apply(in: UInt, n: Int) = {
val sels = Wire(Vec(n, UInt(in.getWidth.W)))
var mask = in
for (i <- 0 until n) {
sels(i) := PriorityEncoderOH(mask)
mask = mask & ~sels(i)
}
sels
}
}
/**
* Connect the first k of n valid input interfaces to k output interfaces.
*/
class Compactor[T <: chisel3.Data](n: Int, k: Int, gen: T) extends Module
{
require(n >= k)
val io = IO(new Bundle {
val in = Vec(n, Flipped(DecoupledIO(gen)))
val out = Vec(k, DecoupledIO(gen))
})
if (n == k) {
io.out <> io.in
} else {
val counts = io.in.map(_.valid).scanLeft(1.U(k.W)) ((c,e) => Mux(e, (c<<1)(k-1,0), c))
val sels = Transpose(VecInit(counts map (c => VecInit(c.asBools)))) map (col =>
(col zip io.in.map(_.valid)) map {case (c,v) => c && v})
val in_readys = counts map (row => (row.asBools zip io.out.map(_.ready)) map {case (c,r) => c && r} reduce (_||_))
val out_valids = sels map (col => col.reduce(_||_))
val out_data = sels map (s => Mux1H(s, io.in.map(_.bits)))
in_readys zip io.in foreach {case (r,i) => i.ready := r}
out_valids zip out_data zip io.out foreach {case ((v,d),o) => o.valid := v; o.bits := d}
}
}
/**
* Create a queue that can be killed with a branch kill signal.
* Assumption: enq.valid only high if not killed by branch (so don't check IsKilled on io.enq).
*/
class BranchKillableQueue[T <: boom.v4.common.HasBoomUOP](gen: T, entries: Int, flush_fn: boom.v4.common.MicroOp => Bool = u => true.B, fastDeq: Boolean = false)
(implicit p: org.chipsalliance.cde.config.Parameters)
extends boom.v4.common.BoomModule()(p)
with boom.v4.common.HasBoomCoreParameters
{
val io = IO(new Bundle {
val enq = Flipped(Decoupled(gen))
val deq = Decoupled(gen)
val brupdate = Input(new BrUpdateInfo())
val flush = Input(Bool())
val empty = Output(Bool())
val count = Output(UInt(log2Ceil(entries).W))
})
if (fastDeq && entries > 1) {
// Pipeline dequeue selection so the mux gets an entire cycle
val main = Module(new BranchKillableQueue(gen, entries-1, flush_fn, false))
val out_reg = Reg(gen)
val out_valid = RegInit(false.B)
val out_uop = Reg(new MicroOp)
main.io.enq <> io.enq
main.io.brupdate := io.brupdate
main.io.flush := io.flush
io.empty := main.io.empty && !out_valid
io.count := main.io.count + out_valid
io.deq.valid := out_valid
io.deq.bits := out_reg
io.deq.bits.uop := out_uop
out_uop := UpdateBrMask(io.brupdate, out_uop)
out_valid := out_valid && !IsKilledByBranch(io.brupdate, false.B, out_uop) && !(io.flush && flush_fn(out_uop))
main.io.deq.ready := false.B
when (io.deq.fire || !out_valid) {
out_valid := main.io.deq.valid && !IsKilledByBranch(io.brupdate, false.B, main.io.deq.bits.uop) && !(io.flush && flush_fn(main.io.deq.bits.uop))
out_reg := main.io.deq.bits
out_uop := UpdateBrMask(io.brupdate, main.io.deq.bits.uop)
main.io.deq.ready := true.B
}
} else {
val ram = Mem(entries, gen)
val valids = RegInit(VecInit(Seq.fill(entries) {false.B}))
val uops = Reg(Vec(entries, new MicroOp))
val enq_ptr = Counter(entries)
val deq_ptr = Counter(entries)
val maybe_full = RegInit(false.B)
val ptr_match = enq_ptr.value === deq_ptr.value
io.empty := ptr_match && !maybe_full
val full = ptr_match && maybe_full
val do_enq = WireInit(io.enq.fire && !IsKilledByBranch(io.brupdate, false.B, io.enq.bits.uop) && !(io.flush && flush_fn(io.enq.bits.uop)))
val do_deq = WireInit((io.deq.ready || !valids(deq_ptr.value)) && !io.empty)
for (i <- 0 until entries) {
val mask = uops(i).br_mask
val uop = uops(i)
valids(i) := valids(i) && !IsKilledByBranch(io.brupdate, false.B, mask) && !(io.flush && flush_fn(uop))
when (valids(i)) {
uops(i).br_mask := GetNewBrMask(io.brupdate, mask)
}
}
when (do_enq) {
ram(enq_ptr.value) := io.enq.bits
valids(enq_ptr.value) := true.B
uops(enq_ptr.value) := io.enq.bits.uop
uops(enq_ptr.value).br_mask := GetNewBrMask(io.brupdate, io.enq.bits.uop)
enq_ptr.inc()
}
when (do_deq) {
valids(deq_ptr.value) := false.B
deq_ptr.inc()
}
when (do_enq =/= do_deq) {
maybe_full := do_enq
}
io.enq.ready := !full
val out = Wire(gen)
out := ram(deq_ptr.value)
out.uop := uops(deq_ptr.value)
io.deq.valid := !io.empty && valids(deq_ptr.value)
io.deq.bits := out
val ptr_diff = enq_ptr.value - deq_ptr.value
if (isPow2(entries)) {
io.count := Cat(maybe_full && ptr_match, ptr_diff)
}
else {
io.count := Mux(ptr_match,
Mux(maybe_full,
entries.asUInt, 0.U),
Mux(deq_ptr.value > enq_ptr.value,
entries.asUInt + ptr_diff, ptr_diff))
}
}
}
// ------------------------------------------
// Printf helper functions
// ------------------------------------------
object BoolToChar
{
/**
* Take in a Chisel Bool and convert it into a Str
* based on the Chars given
*
* @param c_bool Chisel Bool
* @param trueChar Scala Char if bool is true
* @param falseChar Scala Char if bool is false
* @return UInt ASCII Char for "trueChar" or "falseChar"
*/
def apply(c_bool: Bool, trueChar: Char, falseChar: Char = '-'): UInt = {
Mux(c_bool, Str(trueChar), Str(falseChar))
}
}
object CfiTypeToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param cfi_type specific cfi type
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(cfi_type: UInt) = {
val strings = Seq("----", "BR ", "JAL ", "JALR")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(cfi_type)
}
}
object BpdTypeToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param bpd_type specific bpd type
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(bpd_type: UInt) = {
val strings = Seq("BR ", "JUMP", "----", "RET ", "----", "CALL", "----", "----")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(bpd_type)
}
}
object RobTypeToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param rob_type specific rob type
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(rob_type: UInt) = {
val strings = Seq("RST", "NML", "RBK", " WT")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(rob_type)
}
}
object XRegToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param xreg specific register number
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(xreg: UInt) = {
val strings = Seq(" x0", " ra", " sp", " gp",
" tp", " t0", " t1", " t2",
" s0", " s1", " a0", " a1",
" a2", " a3", " a4", " a5",
" a6", " a7", " s2", " s3",
" s4", " s5", " s6", " s7",
" s8", " s9", "s10", "s11",
" t3", " t4", " t5", " t6")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(xreg)
}
}
object FPRegToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param fpreg specific register number
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(fpreg: UInt) = {
val strings = Seq(" ft0", " ft1", " ft2", " ft3",
" ft4", " ft5", " ft6", " ft7",
" fs0", " fs1", " fa0", " fa1",
" fa2", " fa3", " fa4", " fa5",
" fa6", " fa7", " fs2", " fs3",
" fs4", " fs5", " fs6", " fs7",
" fs8", " fs9", "fs10", "fs11",
" ft8", " ft9", "ft10", "ft11")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(fpreg)
}
}
object BoomCoreStringPrefix
{
/**
* Add prefix to BOOM strings (currently only adds the hartId)
*
* @param strs list of strings
* @return String combining the list with the prefix per line
*/
def apply(strs: String*)(implicit p: Parameters) = {
val prefix = "[C" + s"${p(TileKey).tileId}" + "] "
strs.map(str => prefix + str + "\n").mkString("")
}
}
class BranchKillablePipeline[T <: boom.v4.common.HasBoomUOP](gen: T, stages: Int)
(implicit p: org.chipsalliance.cde.config.Parameters)
extends boom.v4.common.BoomModule()(p)
with boom.v4.common.HasBoomCoreParameters
{
val io = IO(new Bundle {
val req = Input(Valid(gen))
val flush = Input(Bool())
val brupdate = Input(new BrUpdateInfo)
val resp = Output(Vec(stages, Valid(gen)))
})
require(stages > 0)
val uops = Reg(Vec(stages, Valid(gen)))
uops(0).valid := io.req.valid && !IsKilledByBranch(io.brupdate, io.flush, io.req.bits)
uops(0).bits := UpdateBrMask(io.brupdate, io.req.bits)
for (i <- 1 until stages) {
uops(i).valid := uops(i-1).valid && !IsKilledByBranch(io.brupdate, io.flush, uops(i-1).bits)
uops(i).bits := UpdateBrMask(io.brupdate, uops(i-1).bits)
}
for (i <- 0 until stages) { when (reset.asBool) { uops(i).valid := false.B } }
io.resp := uops
}
| module ram_15x137( // @[util.scala:503:22]
input [3:0] R0_addr,
input R0_en,
input R0_clk,
output [136:0] R0_data,
input [3:0] W0_addr,
input W0_en,
input W0_clk,
input [136:0] W0_data
);
reg [136:0] Memory[0:14]; // @[util.scala:503:22]
always @(posedge W0_clk) begin // @[util.scala:503:22]
if (W0_en & 1'h1) // @[util.scala:503:22]
Memory[W0_addr] <= W0_data; // @[util.scala:503:22]
always @(posedge)
assign R0_data = R0_en ? Memory[R0_addr] : 137'bx; // @[util.scala:503:22]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Monitor.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceLine
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import freechips.rocketchip.diplomacy.EnableMonitors
import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode}
import freechips.rocketchip.util.PlusArg
case class TLMonitorArgs(edge: TLEdge)
abstract class TLMonitorBase(args: TLMonitorArgs) extends Module
{
val io = IO(new Bundle {
val in = Input(new TLBundle(args.edge.bundle))
})
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit
legalize(io.in, args.edge, reset)
}
object TLMonitor {
def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = {
if (enable) {
EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) }
} else { node }
}
}
class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args)
{
require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal))
val cover_prop_class = PropertyClass.Default
//Like assert but can flip to being an assumption for formal verification
def monAssert(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir, cond, message, PropertyClass.Default)
}
def assume(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir.flip, cond, message, PropertyClass.Default)
}
def extra = {
args.edge.sourceInfo match {
case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)"
case _ => ""
}
}
def visible(address: UInt, source: UInt, edge: TLEdge) =
edge.client.clients.map { c =>
!c.sourceId.contains(source) ||
c.visibility.map(_.contains(address)).reduce(_ || _)
}.reduce(_ && _)
def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = {
//switch this flag to turn on diplomacy in error messages
def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n"
monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra)
// Reuse these subexpressions to save some firrtl lines
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility")
//The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B
//TODO: check for acquireT?
when (bundle.opcode === TLMessages.AcquireBlock) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AcquirePerm) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra)
monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra)
monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra)
monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra)
}
}
def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = {
monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility")
// Reuse these subexpressions to save some firrtl lines
val address_ok = edge.manager.containsSafe(edge.address(bundle))
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source
when (bundle.opcode === TLMessages.Probe) {
assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra)
assume (address_ok, "'B' channel Probe carries unmanaged address" + extra)
assume (legal_source, "'B' channel Probe carries source that is not first source" + extra)
assume (is_aligned, "'B' channel Probe address not aligned to size" + extra)
assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra)
assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra)
assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra)
monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra)
monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra)
}
}
def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = {
monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val address_ok = edge.manager.containsSafe(edge.address(bundle))
monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility")
when (bundle.opcode === TLMessages.ProbeAck) {
monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ProbeAckData) {
monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.Release) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ReleaseData) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra)
}
}
def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = {
assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val sink_ok = bundle.sink < edge.manager.endSinkId.U
val deny_put_ok = edge.manager.mayDenyPut.B
val deny_get_ok = edge.manager.mayDenyGet.B
when (bundle.opcode === TLMessages.ReleaseAck) {
assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra)
assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra)
assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra)
}
when (bundle.opcode === TLMessages.Grant) {
assume (source_ok, "'D' channel Grant carries invalid source ID" + extra)
assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra)
assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra)
}
when (bundle.opcode === TLMessages.GrantData) {
assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra)
assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra)
}
}
def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = {
val sink_ok = bundle.sink < edge.manager.endSinkId.U
monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra)
}
def legalizeFormat(bundle: TLBundle, edge: TLEdge) = {
when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) }
when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) }
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) }
when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) }
when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) }
} else {
monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra)
monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra)
monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra)
}
}
def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = {
val a_first = edge.first(a.bits, a.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (a.valid && !a_first) {
monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra)
monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra)
monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra)
monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra)
monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra)
}
when (a.fire && a_first) {
opcode := a.bits.opcode
param := a.bits.param
size := a.bits.size
source := a.bits.source
address := a.bits.address
}
}
def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = {
val b_first = edge.first(b.bits, b.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (b.valid && !b_first) {
monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra)
monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra)
monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra)
monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra)
monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra)
}
when (b.fire && b_first) {
opcode := b.bits.opcode
param := b.bits.param
size := b.bits.size
source := b.bits.source
address := b.bits.address
}
}
def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = {
// Symbolic variable
val sym_source = Wire(UInt(edge.client.endSourceId.W))
// TODO: Connect sym_source to a fixed value for simulation and to a
// free wire in formal
sym_source := 0.U
// Type casting Int to UInt
val maxSourceId = Wire(UInt(edge.client.endSourceId.W))
maxSourceId := edge.client.endSourceId.U
// Delayed verison of sym_source
val sym_source_d = Reg(UInt(edge.client.endSourceId.W))
sym_source_d := sym_source
// These will be constraints for FV setup
Property(
MonitorDirection.Monitor,
(sym_source === sym_source_d),
"sym_source should remain stable",
PropertyClass.Default)
Property(
MonitorDirection.Monitor,
(sym_source <= maxSourceId),
"sym_source should take legal value",
PropertyClass.Default)
val my_resp_pend = RegInit(false.B)
val my_opcode = Reg(UInt())
val my_size = Reg(UInt())
val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire)
val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire)
val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source)
val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source)
val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat)
val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend)
when (my_set_resp_pend) {
my_resp_pend := true.B
} .elsewhen (my_clr_resp_pend) {
my_resp_pend := false.B
}
when (my_a_first_beat) {
my_opcode := bundle.a.bits.opcode
my_size := bundle.a.bits.size
}
val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size)
val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode)
val my_resp_opcode_legal = Wire(Bool())
when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) ||
(my_resp_opcode === TLMessages.LogicalData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData)
} .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck)
} .otherwise {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck)
}
monAssert (IfThen(my_resp_pend, !my_a_first_beat),
"Request message should not be sent with a source ID, for which a response message" +
"is already pending (not received until current cycle) for a prior request message" +
"with the same source ID" + extra)
assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)),
"Response message should be accepted with a source ID only if a request message with the" +
"same source ID has been accepted or is being accepted in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)),
"Response message should be sent with a source ID only if a request message with the" +
"same source ID has been accepted or is being sent in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)),
"If d_valid is 1, then d_size should be same as a_size of the corresponding request" +
"message" + extra)
assume (IfThen(my_d_first_beat, my_resp_opcode_legal),
"If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" +
"request message" + extra)
}
def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = {
val c_first = edge.first(c.bits, c.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (c.valid && !c_first) {
monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra)
monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra)
monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra)
monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra)
monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra)
}
when (c.fire && c_first) {
opcode := c.bits.opcode
param := c.bits.param
size := c.bits.size
source := c.bits.source
address := c.bits.address
}
}
def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = {
val d_first = edge.first(d.bits, d.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val sink = Reg(UInt())
val denied = Reg(Bool())
when (d.valid && !d_first) {
assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra)
assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra)
assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra)
assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra)
assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra)
assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra)
}
when (d.fire && d_first) {
opcode := d.bits.opcode
param := d.bits.param
size := d.bits.size
source := d.bits.source
sink := d.bits.sink
denied := d.bits.denied
}
}
def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = {
legalizeMultibeatA(bundle.a, edge)
legalizeMultibeatD(bundle.d, edge)
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
legalizeMultibeatB(bundle.b, edge)
legalizeMultibeatC(bundle.c, edge)
}
}
//This is left in for almond which doesn't adhere to the tilelink protocol
@deprecated("Use legalizeADSource instead if possible","")
def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.client.endSourceId.W))
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val a_set = WireInit(0.U(edge.client.endSourceId.W))
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra)
}
if (edge.manager.minLatency > 0) {
assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = {
val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size)
val log_a_size_bus_size = log2Ceil(a_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error
inflight.suggestName("inflight")
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
inflight_opcodes.suggestName("inflight_opcodes")
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
inflight_sizes.suggestName("inflight_sizes")
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
a_first.suggestName("a_first")
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
d_first.suggestName("d_first")
val a_set = WireInit(0.U(edge.client.endSourceId.W))
val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
a_set.suggestName("a_set")
a_set_wo_ready.suggestName("a_set_wo_ready")
val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
a_opcodes_set.suggestName("a_opcodes_set")
val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
a_sizes_set.suggestName("a_sizes_set")
val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W))
a_opcode_lookup.suggestName("a_opcode_lookup")
a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U
val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W))
a_size_lookup.suggestName("a_size_lookup")
a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U
val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant))
val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant))
val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W))
a_opcodes_set_interm.suggestName("a_opcodes_set_interm")
val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W))
a_sizes_set_interm.suggestName("a_sizes_set_interm")
when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) {
a_set_wo_ready := UIntToOH(bundle.a.bits.source)
}
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U
a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U
a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U)
a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U)
monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
d_opcodes_clr.suggestName("d_opcodes_clr")
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) ||
(bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra)
assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) ||
(bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra)
assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) {
assume((!bundle.d.ready) || bundle.a.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = {
val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size)
val log_c_size_bus_size = log2Ceil(c_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W))
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
inflight.suggestName("inflight")
inflight_opcodes.suggestName("inflight_opcodes")
inflight_sizes.suggestName("inflight_sizes")
val c_first = edge.first(bundle.c.bits, bundle.c.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
c_first.suggestName("c_first")
d_first.suggestName("d_first")
val c_set = WireInit(0.U(edge.client.endSourceId.W))
val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
c_set.suggestName("c_set")
c_set_wo_ready.suggestName("c_set_wo_ready")
c_opcodes_set.suggestName("c_opcodes_set")
c_sizes_set.suggestName("c_sizes_set")
val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W))
val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W))
c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U
c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U
c_opcode_lookup.suggestName("c_opcode_lookup")
c_size_lookup.suggestName("c_size_lookup")
val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W))
val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W))
c_opcodes_set_interm.suggestName("c_opcodes_set_interm")
c_sizes_set_interm.suggestName("c_sizes_set_interm")
when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) {
c_set_wo_ready := UIntToOH(bundle.c.bits.source)
}
when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) {
c_set := UIntToOH(bundle.c.bits.source)
c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U
c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U
c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U)
c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U)
monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra)
}
val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
d_opcodes_clr.suggestName("d_opcodes_clr")
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) {
assume((!bundle.d.ready) || bundle.c.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
when (c_set_wo_ready.orR) {
assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra)
}
}
inflight := (inflight | c_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.manager.endSinkId.W))
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val e_first = true.B
val d_set = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) {
d_set := UIntToOH(bundle.d.bits.sink)
assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra)
}
val e_clr = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) {
e_clr := UIntToOH(bundle.e.bits.sink)
monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra)
}
// edge.client.minLatency applies to BC, not DE
inflight := (inflight | d_set) & ~e_clr
}
def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = {
val sourceBits = log2Ceil(edge.client.endSourceId)
val tooBig = 14 // >16kB worth of flight information gets to be too much
if (sourceBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked")
} else {
if (args.edge.params(TestplanTestType).simulation) {
if (args.edge.params(TLMonitorStrictMode)) {
legalizeADSource(bundle, edge)
legalizeCDSource(bundle, edge)
} else {
legalizeADSourceOld(bundle, edge)
}
}
if (args.edge.params(TestplanTestType).formal) {
legalizeADSourceFormal(bundle, edge)
}
}
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
// legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize...
val sinkBits = log2Ceil(edge.manager.endSinkId)
if (sinkBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked")
} else {
legalizeDESink(bundle, edge)
}
}
}
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = {
legalizeFormat (bundle, edge)
legalizeMultibeat (bundle, edge)
legalizeUnique (bundle, edge)
}
}
File Misc.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
import chisel3.util.random.LFSR
import org.chipsalliance.cde.config.Parameters
import scala.math._
class ParameterizedBundle(implicit p: Parameters) extends Bundle
trait Clocked extends Bundle {
val clock = Clock()
val reset = Bool()
}
object DecoupledHelper {
def apply(rvs: Bool*) = new DecoupledHelper(rvs)
}
class DecoupledHelper(val rvs: Seq[Bool]) {
def fire(exclude: Bool, includes: Bool*) = {
require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!")
(rvs.filter(_ ne exclude) ++ includes).reduce(_ && _)
}
def fire() = {
rvs.reduce(_ && _)
}
}
object MuxT {
def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2))
def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3))
def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4))
}
/** Creates a cascade of n MuxTs to search for a key value. */
object MuxTLookup {
def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
}
object ValidMux {
def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = {
apply(v1 +: v2.toSeq)
}
def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = {
val out = Wire(Valid(valids.head.bits.cloneType))
out.valid := valids.map(_.valid).reduce(_ || _)
out.bits := MuxCase(valids.head.bits,
valids.map(v => (v.valid -> v.bits)))
out
}
}
object Str
{
def apply(s: String): UInt = {
var i = BigInt(0)
require(s.forall(validChar _))
for (c <- s)
i = (i << 8) | c
i.U((s.length*8).W)
}
def apply(x: Char): UInt = {
require(validChar(x))
x.U(8.W)
}
def apply(x: UInt): UInt = apply(x, 10)
def apply(x: UInt, radix: Int): UInt = {
val rad = radix.U
val w = x.getWidth
require(w > 0)
var q = x
var s = digit(q % rad)
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s)
}
s
}
def apply(x: SInt): UInt = apply(x, 10)
def apply(x: SInt, radix: Int): UInt = {
val neg = x < 0.S
val abs = x.abs.asUInt
if (radix != 10) {
Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix))
} else {
val rad = radix.U
val w = abs.getWidth
require(w > 0)
var q = abs
var s = digit(q % rad)
var needSign = neg
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
val placeSpace = q === 0.U
val space = Mux(needSign, Str('-'), Str(' '))
needSign = needSign && !placeSpace
s = Cat(Mux(placeSpace, space, digit(q % rad)), s)
}
Cat(Mux(needSign, Str('-'), Str(' ')), s)
}
}
private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0)
private def validChar(x: Char) = x == (x & 0xFF)
}
object Split
{
def apply(x: UInt, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n2: Int, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
}
object Random
{
def apply(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0)
else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod))
}
def apply(mod: Int): UInt = apply(mod, randomizer)
def oneHot(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0))
else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt
}
def oneHot(mod: Int): UInt = oneHot(mod, randomizer)
private def randomizer = LFSR(16)
private def partition(value: UInt, slices: Int) =
Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U)
}
object Majority {
def apply(in: Set[Bool]): Bool = {
val n = (in.size >> 1) + 1
val clauses = in.subsets(n).map(_.reduce(_ && _))
clauses.reduce(_ || _)
}
def apply(in: Seq[Bool]): Bool = apply(in.toSet)
def apply(in: UInt): Bool = apply(in.asBools.toSet)
}
object PopCountAtLeast {
private def two(x: UInt): (Bool, Bool) = x.getWidth match {
case 1 => (x.asBool, false.B)
case n =>
val half = x.getWidth / 2
val (leftOne, leftTwo) = two(x(half - 1, 0))
val (rightOne, rightTwo) = two(x(x.getWidth - 1, half))
(leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne))
}
def apply(x: UInt, n: Int): Bool = n match {
case 0 => true.B
case 1 => x.orR
case 2 => two(x)._2
case 3 => PopCount(x) >= n.U
}
}
// This gets used everywhere, so make the smallest circuit possible ...
// Given an address and size, create a mask of beatBytes size
// eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111
// groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01
object MaskGen {
def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = {
require (groupBy >= 1 && beatBytes >= groupBy)
require (isPow2(beatBytes) && isPow2(groupBy))
val lgBytes = log2Ceil(beatBytes)
val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U
def helper(i: Int): Seq[(Bool, Bool)] = {
if (i == 0) {
Seq((lgSize >= lgBytes.asUInt, true.B))
} else {
val sub = helper(i-1)
val size = sizeOH(lgBytes - i)
val bit = addr_lo(lgBytes - i)
val nbit = !bit
Seq.tabulate (1 << i) { j =>
val (sub_acc, sub_eq) = sub(j/2)
val eq = sub_eq && (if (j % 2 == 1) bit else nbit)
val acc = sub_acc || (size && eq)
(acc, eq)
}
}
}
if (groupBy == beatBytes) 1.U else
Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse)
}
}
File PlusArg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.experimental._
import chisel3.util.HasBlackBoxResource
@deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05")
case class PlusArgInfo(default: BigInt, docstring: String)
/** Case class for PlusArg information
*
* @tparam A scala type of the PlusArg value
* @param default optional default value
* @param docstring text to include in the help
* @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT)
*/
private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String)
/** Typeclass for converting a type to a doctype string
* @tparam A some type
*/
trait Doctypeable[A] {
/** Return the doctype string for some option */
def toDoctype(a: Option[A]): String
}
/** Object containing implementations of the Doctypeable typeclass */
object Doctypes {
/** Converts an Int => "INT" */
implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" }
/** Converts a BigInt => "INT" */
implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" }
/** Converts a String => "STRING" */
implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" }
}
class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map(
"FORMAT" -> StringParam(format),
"DEFAULT" -> IntParam(default),
"WIDTH" -> IntParam(width)
)) with HasBlackBoxResource {
val io = IO(new Bundle {
val out = Output(UInt(width.W))
})
addResource("/vsrc/plusarg_reader.v")
}
/* This wrapper class has no outputs, making it clear it is a simulation-only construct */
class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module {
val io = IO(new Bundle {
val count = Input(UInt(width.W))
})
val max = Module(new plusarg_reader(format, default, docstring, width)).io.out
when (max > 0.U) {
assert (io.count < max, s"Timeout exceeded: $docstring")
}
}
import Doctypes._
object PlusArg
{
/** PlusArg("foo") will return 42.U if the simulation is run with +foo=42
* Do not use this as an initial register value. The value is set in an
* initial block and thus accessing it from another initial is racey.
* Add a docstring to document the arg, which can be dumped in an elaboration
* pass.
*/
def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out
}
/** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert
* to kill the simulation when count exceeds the specified integer argument.
* Default 0 will never assert.
*/
def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count
}
}
object PlusArgArtefacts {
private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty
/* Add a new PlusArg */
@deprecated(
"Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08",
"Rocket Chip 2020.05"
)
def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring)
/** Add a new PlusArg
*
* @tparam A scala type of the PlusArg value
* @param name name for the PlusArg
* @param default optional default value
* @param docstring text to include in the help
*/
def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit =
artefacts = artefacts ++
Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default)))
/* From plus args, generate help text */
private def serializeHelp_cHeader(tab: String = ""): String = artefacts
.map{ case(arg, info) =>
s"""|$tab+$arg=${info.doctype}\\n\\
|$tab${" "*20}${info.docstring}\\n\\
|""".stripMargin ++ info.default.map{ case default =>
s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("")
}.toSeq.mkString("\\n\\\n") ++ "\""
/* From plus args, generate a char array of their names */
private def serializeArray_cHeader(tab: String = ""): String = {
val prettyTab = tab + " " * 44 // Length of 'static const ...'
s"${tab}static const char * verilog_plusargs [] = {\\\n" ++
artefacts
.map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" }
.mkString("")++
s"${prettyTab}0};"
}
/* Generate C code to be included in emulator.cc that helps with
* argument parsing based on available Verilog PlusArgs */
def serialize_cHeader(): String =
s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\
|${serializeHelp_cHeader(" "*7)}
|${serializeArray_cHeader()}
|""".stripMargin
}
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
File Parameters.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.diplomacy
import chisel3._
import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor}
import freechips.rocketchip.util.ShiftQueue
/** Options for describing the attributes of memory regions */
object RegionType {
// Define the 'more relaxed than' ordering
val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS)
sealed trait T extends Ordered[T] {
def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this)
}
case object CACHED extends T // an intermediate agent may have cached a copy of the region for you
case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided
case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible
case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached
case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects
case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed
case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively
}
// A non-empty half-open range; [start, end)
case class IdRange(start: Int, end: Int) extends Ordered[IdRange]
{
require (start >= 0, s"Ids cannot be negative, but got: $start.")
require (start <= end, "Id ranges cannot be negative.")
def compare(x: IdRange) = {
val primary = (this.start - x.start).signum
val secondary = (x.end - this.end).signum
if (primary != 0) primary else secondary
}
def overlaps(x: IdRange) = start < x.end && x.start < end
def contains(x: IdRange) = start <= x.start && x.end <= end
def contains(x: Int) = start <= x && x < end
def contains(x: UInt) =
if (size == 0) {
false.B
} else if (size == 1) { // simple comparison
x === start.U
} else {
// find index of largest different bit
val largestDeltaBit = log2Floor(start ^ (end-1))
val smallestCommonBit = largestDeltaBit + 1 // may not exist in x
val uncommonMask = (1 << smallestCommonBit) - 1
val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0)
// the prefix must match exactly (note: may shift ALL bits away)
(x >> smallestCommonBit) === (start >> smallestCommonBit).U &&
// firrtl constant prop range analysis can eliminate these two:
(start & uncommonMask).U <= uncommonBits &&
uncommonBits <= ((end-1) & uncommonMask).U
}
def shift(x: Int) = IdRange(start+x, end+x)
def size = end - start
def isEmpty = end == start
def range = start until end
}
object IdRange
{
def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else {
val ranges = s.sorted
(ranges.tail zip ranges.init) find { case (a, b) => a overlaps b }
}
}
// An potentially empty inclusive range of 2-powers [min, max] (in bytes)
case class TransferSizes(min: Int, max: Int)
{
def this(x: Int) = this(x, x)
require (min <= max, s"Min transfer $min > max transfer $max")
require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)")
require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max")
require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min")
require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)")
def none = min == 0
def contains(x: Int) = isPow2(x) && min <= x && x <= max
def containsLg(x: Int) = contains(1 << x)
def containsLg(x: UInt) =
if (none) false.B
else if (min == max) { log2Ceil(min).U === x }
else { log2Ceil(min).U <= x && x <= log2Ceil(max).U }
def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max)
def intersect(x: TransferSizes) =
if (x.max < min || max < x.min) TransferSizes.none
else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max))
// Not a union, because the result may contain sizes contained by neither term
// NOT TO BE CONFUSED WITH COVERPOINTS
def mincover(x: TransferSizes) = {
if (none) {
x
} else if (x.none) {
this
} else {
TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max))
}
}
override def toString() = "TransferSizes[%d, %d]".format(min, max)
}
object TransferSizes {
def apply(x: Int) = new TransferSizes(x)
val none = new TransferSizes(0)
def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _)
def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _)
implicit def asBool(x: TransferSizes) = !x.none
}
// AddressSets specify the address space managed by the manager
// Base is the base address, and mask are the bits consumed by the manager
// e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff
// e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ...
case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet]
{
// Forbid misaligned base address (and empty sets)
require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}")
require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous
// We do allow negative mask (=> ignore all high bits)
def contains(x: BigInt) = ((x ^ base) & ~mask) == 0
def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S
// turn x into an address contained in this set
def legalize(x: UInt): UInt = base.U | (mask.U & x)
// overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1)
def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0
// contains iff bitwise: x.mask => mask && contains(x.base)
def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0
// The number of bytes to which the manager must be aligned
def alignment = ((mask + 1) & ~mask)
// Is this a contiguous memory range
def contiguous = alignment == mask+1
def finite = mask >= 0
def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask }
// Widen the match function to ignore all bits in imask
def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask)
// Return an AddressSet that only contains the addresses both sets contain
def intersect(x: AddressSet): Option[AddressSet] = {
if (!overlaps(x)) {
None
} else {
val r_mask = mask & x.mask
val r_base = base | x.base
Some(AddressSet(r_base, r_mask))
}
}
def subtract(x: AddressSet): Seq[AddressSet] = {
intersect(x) match {
case None => Seq(this)
case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit =>
val nmask = (mask & (bit-1)) | remove.mask
val nbase = (remove.base ^ bit) & ~nmask
AddressSet(nbase, nmask)
}
}
}
// AddressSets have one natural Ordering (the containment order, if contiguous)
def compare(x: AddressSet) = {
val primary = (this.base - x.base).signum // smallest address first
val secondary = (x.mask - this.mask).signum // largest mask first
if (primary != 0) primary else secondary
}
// We always want to see things in hex
override def toString() = {
if (mask >= 0) {
"AddressSet(0x%x, 0x%x)".format(base, mask)
} else {
"AddressSet(0x%x, ~0x%x)".format(base, ~mask)
}
}
def toRanges = {
require (finite, "Ranges cannot be calculated on infinite mask")
val size = alignment
val fragments = mask & ~(size-1)
val bits = bitIndexes(fragments)
(BigInt(0) until (BigInt(1) << bits.size)).map { i =>
val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) }
AddressRange(off, size)
}
}
}
object AddressSet
{
val everything = AddressSet(0, -1)
def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = {
if (size == 0) tail.reverse else {
val maxBaseAlignment = base & (-base) // 0 for infinite (LSB)
val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size
val step =
if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment)
maxSizeAlignment else maxBaseAlignment
misaligned(base+step, size-step, AddressSet(base, step-1) +: tail)
}
}
def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = {
// Pair terms up by ignoring 'bit'
seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) =>
if (seq.size == 1) {
seq.head // singleton -> unaffected
} else {
key.copy(mask = key.mask | bit) // pair - widen mask by bit
}
}.toList
}
def unify(seq: Seq[AddressSet]): Seq[AddressSet] = {
val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _)
AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted
}
def enumerateMask(mask: BigInt): Seq[BigInt] = {
def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] =
if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail)
helper(0, Nil)
}
def enumerateBits(mask: BigInt): Seq[BigInt] = {
def helper(x: BigInt): Seq[BigInt] = {
if (x == 0) {
Nil
} else {
val bit = x & (-x)
bit +: helper(x & ~bit)
}
}
helper(mask)
}
}
case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean)
{
require (depth >= 0, "Buffer depth must be >= 0")
def isDefined = depth > 0
def latency = if (isDefined && !flow) 1 else 0
def apply[T <: Data](x: DecoupledIO[T]) =
if (isDefined) Queue(x, depth, flow=flow, pipe=pipe)
else x
def irrevocable[T <: Data](x: ReadyValidIO[T]) =
if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe)
else x
def sq[T <: Data](x: DecoupledIO[T]) =
if (!isDefined) x else {
val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe))
sq.io.enq <> x
sq.io.deq
}
override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "")
}
object BufferParams
{
implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false)
val default = BufferParams(2)
val none = BufferParams(0)
val flow = BufferParams(1, true, false)
val pipe = BufferParams(1, false, true)
}
case class TriStateValue(value: Boolean, set: Boolean)
{
def update(orig: Boolean) = if (set) value else orig
}
object TriStateValue
{
implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true)
def unset = TriStateValue(false, false)
}
trait DirectedBuffers[T] {
def copyIn(x: BufferParams): T
def copyOut(x: BufferParams): T
def copyInOut(x: BufferParams): T
}
trait IdMapEntry {
def name: String
def from: IdRange
def to: IdRange
def isCache: Boolean
def requestFifo: Boolean
def maxTransactionsInFlight: Option[Int]
def pretty(fmt: String) =
if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5
fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
} else {
fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
}
}
abstract class IdMap[T <: IdMapEntry] {
protected val fmt: String
val mapping: Seq[T]
def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n")
}
File Edges.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
class TLEdge(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdgeParameters(client, manager, params, sourceInfo)
{
def isAligned(address: UInt, lgSize: UInt): Bool = {
if (maxLgSize == 0) true.B else {
val mask = UIntToOH1(lgSize, maxLgSize)
(address & mask) === 0.U
}
}
def mask(address: UInt, lgSize: UInt): UInt =
MaskGen(address, lgSize, manager.beatBytes)
def staticHasData(bundle: TLChannel): Option[Boolean] = {
bundle match {
case _:TLBundleA => {
// Do there exist A messages with Data?
val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial
// Do there exist A messages without Data?
val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint
// Statically optimize the case where hasData is a constant
if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None
}
case _:TLBundleB => {
// Do there exist B messages with Data?
val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial
// Do there exist B messages without Data?
val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint
// Statically optimize the case where hasData is a constant
if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None
}
case _:TLBundleC => {
// Do there eixst C messages with Data?
val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe
// Do there exist C messages without Data?
val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe
if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None
}
case _:TLBundleD => {
// Do there eixst D messages with Data?
val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB
// Do there exist D messages without Data?
val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT
if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None
}
case _:TLBundleE => Some(false)
}
}
def isRequest(x: TLChannel): Bool = {
x match {
case a: TLBundleA => true.B
case b: TLBundleB => true.B
case c: TLBundleC => c.opcode(2) && c.opcode(1)
// opcode === TLMessages.Release ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(2) && !d.opcode(1)
// opcode === TLMessages.Grant ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
}
def isResponse(x: TLChannel): Bool = {
x match {
case a: TLBundleA => false.B
case b: TLBundleB => false.B
case c: TLBundleC => !c.opcode(2) || !c.opcode(1)
// opcode =/= TLMessages.Release &&
// opcode =/= TLMessages.ReleaseData
case d: TLBundleD => true.B // Grant isResponse + isRequest
case e: TLBundleE => true.B
}
}
def hasData(x: TLChannel): Bool = {
val opdata = x match {
case a: TLBundleA => !a.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case b: TLBundleB => !b.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case c: TLBundleC => c.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.ProbeAckData ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
staticHasData(x).map(_.B).getOrElse(opdata)
}
def opcode(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.opcode
case b: TLBundleB => b.opcode
case c: TLBundleC => c.opcode
case d: TLBundleD => d.opcode
}
}
def param(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.param
case b: TLBundleB => b.param
case c: TLBundleC => c.param
case d: TLBundleD => d.param
}
}
def size(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.size
case b: TLBundleB => b.size
case c: TLBundleC => c.size
case d: TLBundleD => d.size
}
}
def data(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.data
case b: TLBundleB => b.data
case c: TLBundleC => c.data
case d: TLBundleD => d.data
}
}
def corrupt(x: TLDataChannel): Bool = {
x match {
case a: TLBundleA => a.corrupt
case b: TLBundleB => b.corrupt
case c: TLBundleC => c.corrupt
case d: TLBundleD => d.corrupt
}
}
def mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.mask
case b: TLBundleB => b.mask
case c: TLBundleC => mask(c.address, c.size)
}
}
def full_mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => mask(a.address, a.size)
case b: TLBundleB => mask(b.address, b.size)
case c: TLBundleC => mask(c.address, c.size)
}
}
def address(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.address
case b: TLBundleB => b.address
case c: TLBundleC => c.address
}
}
def source(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.source
case b: TLBundleB => b.source
case c: TLBundleC => c.source
case d: TLBundleD => d.source
}
}
def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes)
def addr_lo(x: UInt): UInt =
if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0)
def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x))
def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x))
def numBeats(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 1.U
case bundle: TLDataChannel => {
val hasData = this.hasData(bundle)
val size = this.size(bundle)
val cutoff = log2Ceil(manager.beatBytes)
val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U
val decode = UIntToOH(size, maxLgSize+1) >> cutoff
Mux(hasData, decode | small.asUInt, 1.U)
}
}
}
def numBeats1(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 0.U
case bundle: TLDataChannel => {
if (maxLgSize == 0) {
0.U
} else {
val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes)
Mux(hasData(bundle), decode, 0.U)
}
}
}
}
def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val beats1 = numBeats1(bits)
val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W))
val counter1 = counter - 1.U
val first = counter === 0.U
val last = counter === 1.U || beats1 === 0.U
val done = last && fire
val count = (beats1 & ~counter1)
when (fire) {
counter := Mux(first, beats1, counter1)
}
(first, last, done, count)
}
def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1
def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire)
def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid)
def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2
def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire)
def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid)
def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3
def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire)
def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid)
def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3)
}
def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire)
def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid)
def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4)
}
def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire)
def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid)
def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes))
}
def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire)
def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid)
// Does the request need T permissions to be executed?
def needT(a: TLBundleA): Bool = {
val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLPermissions.NtoB -> false.B,
TLPermissions.NtoT -> true.B,
TLPermissions.BtoT -> true.B))
MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array(
TLMessages.PutFullData -> true.B,
TLMessages.PutPartialData -> true.B,
TLMessages.ArithmeticData -> true.B,
TLMessages.LogicalData -> true.B,
TLMessages.Get -> false.B,
TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLHints.PREFETCH_READ -> false.B,
TLHints.PREFETCH_WRITE -> true.B)),
TLMessages.AcquireBlock -> acq_needT,
TLMessages.AcquirePerm -> acq_needT))
}
// This is a very expensive circuit; use only if you really mean it!
def inFlight(x: TLBundle): (UInt, UInt) = {
val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W))
val bce = manager.anySupportAcquireB && client.anySupportProbe
val (a_first, a_last, _) = firstlast(x.a)
val (b_first, b_last, _) = firstlast(x.b)
val (c_first, c_last, _) = firstlast(x.c)
val (d_first, d_last, _) = firstlast(x.d)
val (e_first, e_last, _) = firstlast(x.e)
val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits))
val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits))
val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits))
val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits))
val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits))
val a_inc = x.a.fire && a_first && a_request
val b_inc = x.b.fire && b_first && b_request
val c_inc = x.c.fire && c_first && c_request
val d_inc = x.d.fire && d_first && d_request
val e_inc = x.e.fire && e_first && e_request
val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil))
val a_dec = x.a.fire && a_last && a_response
val b_dec = x.b.fire && b_last && b_response
val c_dec = x.c.fire && c_last && c_response
val d_dec = x.d.fire && d_last && d_response
val e_dec = x.e.fire && e_last && e_response
val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil))
val next_flight = flight + PopCount(inc) - PopCount(dec)
flight := next_flight
(flight, next_flight)
}
def prettySourceMapping(context: String): String = {
s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n"
}
}
class TLEdgeOut(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
// Transfers
def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquireBlock
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquirePerm
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.Release
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ReleaseData
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) =
Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B)
def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAck
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions, data)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAckData
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B)
def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink)
def GrantAck(toSink: UInt): TLBundleE = {
val e = Wire(new TLBundleE(bundle))
e.sink := toSink
e
}
// Accesses
def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsGetFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Get
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutFullFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutFullData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, mask, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutPartialFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutPartialData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = {
require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsArithmeticFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.ArithmeticData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsLogicalFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.LogicalData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = {
require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsHintFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Hint
a.param := param
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data)
def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAckData
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size)
def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.HintAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
}
class TLEdgeIn(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = {
val todo = x.filter(!_.isEmpty)
val heads = todo.map(_.head)
val tails = todo.map(_.tail)
if (todo.isEmpty) Nil else { heads +: myTranspose(tails) }
}
// Transfers
def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = {
require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}")
val legal = client.supportsProbe(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Probe
b.param := capPermissions
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.Grant
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.GrantData
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B)
def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.ReleaseAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
// Accesses
def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = {
require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsGet(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Get
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsPutFull(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutFullData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, mask, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsPutPartial(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutPartialData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsArithmetic(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.ArithmeticData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsLogical(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.LogicalData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = {
require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsHint(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Hint
b.param := param
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size)
def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied)
def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data)
def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAckData
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B)
def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied)
def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B)
def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.HintAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
}
| module TLMonitor_44( // @[Monitor.scala:36:7]
input clock, // @[Monitor.scala:36:7]
input reset, // @[Monitor.scala:36:7]
input io_in_a_ready, // @[Monitor.scala:20:14]
input io_in_a_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_size, // @[Monitor.scala:20:14]
input [5:0] io_in_a_bits_source, // @[Monitor.scala:20:14]
input [31:0] io_in_a_bits_address, // @[Monitor.scala:20:14]
input [7:0] io_in_a_bits_mask, // @[Monitor.scala:20:14]
input io_in_a_bits_corrupt, // @[Monitor.scala:20:14]
input io_in_b_ready, // @[Monitor.scala:20:14]
input io_in_b_valid, // @[Monitor.scala:20:14]
input [1:0] io_in_b_bits_param, // @[Monitor.scala:20:14]
input [5:0] io_in_b_bits_source, // @[Monitor.scala:20:14]
input [31:0] io_in_b_bits_address, // @[Monitor.scala:20:14]
input io_in_c_ready, // @[Monitor.scala:20:14]
input io_in_c_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_c_bits_opcode, // @[Monitor.scala:20:14]
input [2:0] io_in_c_bits_param, // @[Monitor.scala:20:14]
input [2:0] io_in_c_bits_size, // @[Monitor.scala:20:14]
input [5:0] io_in_c_bits_source, // @[Monitor.scala:20:14]
input [31:0] io_in_c_bits_address, // @[Monitor.scala:20:14]
input io_in_c_bits_corrupt, // @[Monitor.scala:20:14]
input io_in_d_ready, // @[Monitor.scala:20:14]
input io_in_d_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14]
input [1:0] io_in_d_bits_param, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_size, // @[Monitor.scala:20:14]
input [5:0] io_in_d_bits_source, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_sink, // @[Monitor.scala:20:14]
input io_in_d_bits_denied, // @[Monitor.scala:20:14]
input io_in_d_bits_corrupt, // @[Monitor.scala:20:14]
input io_in_e_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_e_bits_sink // @[Monitor.scala:20:14]
);
wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11]
wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11]
wire [12:0] _GEN = {10'h0, io_in_a_bits_size}; // @[package.scala:243:71]
wire [12:0] _GEN_0 = {10'h0, io_in_c_bits_size}; // @[package.scala:243:71]
wire _a_first_T_1 = io_in_a_ready & io_in_a_valid; // @[Decoupled.scala:51:35]
reg [2:0] a_first_counter; // @[Edges.scala:229:27]
reg [2:0] opcode; // @[Monitor.scala:387:22]
reg [2:0] param; // @[Monitor.scala:388:22]
reg [2:0] size; // @[Monitor.scala:389:22]
reg [5:0] source; // @[Monitor.scala:390:22]
reg [31:0] address; // @[Monitor.scala:391:22]
wire _d_first_T_3 = io_in_d_ready & io_in_d_valid; // @[Decoupled.scala:51:35]
reg [2:0] d_first_counter; // @[Edges.scala:229:27]
reg [2:0] opcode_1; // @[Monitor.scala:538:22]
reg [1:0] param_1; // @[Monitor.scala:539:22]
reg [2:0] size_1; // @[Monitor.scala:540:22]
reg [5:0] source_1; // @[Monitor.scala:541:22]
reg [2:0] sink; // @[Monitor.scala:542:22]
reg denied; // @[Monitor.scala:543:22]
reg [2:0] b_first_counter; // @[Edges.scala:229:27]
reg [1:0] param_2; // @[Monitor.scala:411:22]
reg [5:0] source_2; // @[Monitor.scala:413:22]
reg [31:0] address_1; // @[Monitor.scala:414:22]
wire _c_first_T_1 = io_in_c_ready & io_in_c_valid; // @[Decoupled.scala:51:35]
reg [2:0] c_first_counter; // @[Edges.scala:229:27]
reg [2:0] opcode_3; // @[Monitor.scala:515:22]
reg [2:0] param_3; // @[Monitor.scala:516:22]
reg [2:0] size_3; // @[Monitor.scala:517:22]
reg [5:0] source_3; // @[Monitor.scala:518:22]
reg [31:0] address_2; // @[Monitor.scala:519:22]
reg [62:0] inflight; // @[Monitor.scala:614:27]
reg [251:0] inflight_opcodes; // @[Monitor.scala:616:35]
reg [251:0] inflight_sizes; // @[Monitor.scala:618:33]
reg [2:0] a_first_counter_1; // @[Edges.scala:229:27]
wire a_first_1 = a_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25]
reg [2:0] d_first_counter_1; // @[Edges.scala:229:27]
wire d_first_1 = d_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25]
wire [63:0] _GEN_1 = {58'h0, io_in_a_bits_source}; // @[OneHot.scala:58:35]
wire _GEN_2 = _a_first_T_1 & a_first_1; // @[Decoupled.scala:51:35]
wire d_release_ack = io_in_d_bits_opcode == 3'h6; // @[Monitor.scala:673:46]
wire _GEN_3 = io_in_d_bits_opcode != 3'h6; // @[Monitor.scala:673:46, :674:74]
wire [63:0] _GEN_4 = {58'h0, io_in_d_bits_source}; // @[OneHot.scala:58:35]
reg [31:0] watchdog; // @[Monitor.scala:709:27]
reg [62:0] inflight_1; // @[Monitor.scala:726:35]
reg [251:0] inflight_sizes_1; // @[Monitor.scala:728:35]
reg [2:0] c_first_counter_1; // @[Edges.scala:229:27]
wire c_first_1 = c_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25]
reg [2:0] d_first_counter_2; // @[Edges.scala:229:27]
wire d_first_2 = d_first_counter_2 == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _GEN_5 = io_in_c_bits_opcode[2] & io_in_c_bits_opcode[1]; // @[Edges.scala:68:{36,40,51}]
wire [63:0] _GEN_6 = {58'h0, io_in_c_bits_source}; // @[OneHot.scala:58:35]
wire _GEN_7 = _c_first_T_1 & c_first_1 & _GEN_5; // @[Decoupled.scala:51:35]
reg [31:0] watchdog_1; // @[Monitor.scala:818:27]
reg [6:0] inflight_2; // @[Monitor.scala:828:27]
reg [2:0] d_first_counter_3; // @[Edges.scala:229:27]
wire d_first_3 = d_first_counter_3 == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _GEN_8 = _d_first_T_3 & d_first_3 & io_in_d_bits_opcode[2] & ~(io_in_d_bits_opcode[1]); // @[Decoupled.scala:51:35]
wire [7:0] _d_set_T = 8'h1 << io_in_d_bits_sink; // @[OneHot.scala:58:35]
wire [6:0] d_set = _GEN_8 ? _d_set_T[6:0] : 7'h0; // @[OneHot.scala:58:35] |
Generate the Verilog code corresponding to the following Chisel files.
File Transposer.scala:
package gemmini
import chisel3._
import chisel3.util._
import Util._
trait Transposer[T <: Data] extends Module {
def dim: Int
def dataType: T
val io = IO(new Bundle {
val inRow = Flipped(Decoupled(Vec(dim, dataType)))
val outCol = Decoupled(Vec(dim, dataType))
})
}
class PipelinedTransposer[T <: Data](val dim: Int, val dataType: T) extends Transposer[T] {
require(isPow2(dim))
val regArray = Seq.fill(dim, dim)(Reg(dataType))
val regArrayT = regArray.transpose
val sMoveUp :: sMoveLeft :: Nil = Enum(2)
val state = RegInit(sMoveUp)
val leftCounter = RegInit(0.U(log2Ceil(dim+1).W)) //(io.inRow.fire && state === sMoveLeft, dim+1)
val upCounter = RegInit(0.U(log2Ceil(dim+1).W)) //Counter(io.inRow.fire && state === sMoveUp, dim+1)
io.outCol.valid := 0.U
io.inRow.ready := 0.U
switch(state) {
is(sMoveUp) {
io.inRow.ready := upCounter <= dim.U
io.outCol.valid := leftCounter > 0.U
when(io.inRow.fire) {
upCounter := upCounter + 1.U
}
when(upCounter === (dim-1).U) {
state := sMoveLeft
leftCounter := 0.U
}
when(io.outCol.fire) {
leftCounter := leftCounter - 1.U
}
}
is(sMoveLeft) {
io.inRow.ready := leftCounter <= dim.U // TODO: this is naive
io.outCol.valid := upCounter > 0.U
when(leftCounter === (dim-1).U) {
state := sMoveUp
}
when(io.inRow.fire) {
leftCounter := leftCounter + 1.U
upCounter := 0.U
}
when(io.outCol.fire) {
upCounter := upCounter - 1.U
}
}
}
// Propagate input from bottom row to top row systolically in the move up phase
// TODO: need to iterate over columns to connect Chisel values of type T
// Should be able to operate directly on the Vec, but Seq and Vec don't mix (try Array?)
for (colIdx <- 0 until dim) {
regArray.foldRight(io.inRow.bits(colIdx)) {
case (regRow, prevReg) =>
when (state === sMoveUp) {
regRow(colIdx) := prevReg
}
regRow(colIdx)
}
}
// Propagate input from right side to left side systolically in the move left phase
for (rowIdx <- 0 until dim) {
regArrayT.foldRight(io.inRow.bits(rowIdx)) {
case (regCol, prevReg) =>
when (state === sMoveLeft) {
regCol(rowIdx) := prevReg
}
regCol(rowIdx)
}
}
// Pull from the left side or the top side based on the state
for (idx <- 0 until dim) {
when (state === sMoveUp) {
io.outCol.bits(idx) := regArray(0)(idx)
}.elsewhen(state === sMoveLeft) {
io.outCol.bits(idx) := regArrayT(0)(idx)
}.otherwise {
io.outCol.bits(idx) := DontCare
}
}
}
class AlwaysOutTransposer[T <: Data](val dim: Int, val dataType: T) extends Transposer[T] {
require(isPow2(dim))
val LEFT_DIR = 0.U(1.W)
val UP_DIR = 1.U(1.W)
class PE extends Module {
val io = IO(new Bundle {
val inR = Input(dataType)
val inD = Input(dataType)
val outL = Output(dataType)
val outU = Output(dataType)
val dir = Input(UInt(1.W))
val en = Input(Bool())
})
val reg = RegEnable(Mux(io.dir === LEFT_DIR, io.inR, io.inD), io.en)
io.outU := reg
io.outL := reg
}
val pes = Seq.fill(dim,dim)(Module(new PE))
val counter = RegInit(0.U((log2Ceil(dim) max 1).W)) // TODO replace this with a standard Chisel counter
val dir = RegInit(LEFT_DIR)
// Wire up horizontal signals
for (row <- 0 until dim; col <- 0 until dim) {
val right_in = if (col == dim-1) io.inRow.bits(row) else pes(row)(col+1).io.outL
pes(row)(col).io.inR := right_in
}
// Wire up vertical signals
for (row <- 0 until dim; col <- 0 until dim) {
val down_in = if (row == dim-1) io.inRow.bits(col) else pes(row+1)(col).io.outU
pes(row)(col).io.inD := down_in
}
// Wire up global signals
pes.flatten.foreach(_.io.dir := dir)
pes.flatten.foreach(_.io.en := io.inRow.fire)
io.outCol.valid := true.B
io.inRow.ready := true.B
val left_out = VecInit(pes.transpose.head.map(_.io.outL))
val up_out = VecInit(pes.head.map(_.io.outU))
io.outCol.bits := Mux(dir === LEFT_DIR, left_out, up_out)
when (io.inRow.fire) {
counter := wrappingAdd(counter, 1.U, dim)
}
when (counter === (dim-1).U && io.inRow.fire) {
dir := ~dir
}
}
class NaiveTransposer[T <: Data](val dim: Int, val dataType: T) extends Transposer[T] {
val regArray = Seq.fill(dim, dim)(Reg(dataType))
val regArrayT = regArray.transpose
// state = 0 => filling regArray row-wise, state = 1 => draining regArray column-wise
val state = RegInit(0.U(1.W))
val countInc = io.inRow.fire || io.outCol.fire
val (countValue, countWrap) = Counter(countInc, dim)
io.inRow.ready := state === 0.U
io.outCol.valid := state === 1.U
for (i <- 0 until dim) {
for (j <- 0 until dim) {
when(countValue === i.U && io.inRow.fire) {
regArray(i)(j) := io.inRow.bits(j)
}
}
}
for (i <- 0 until dim) {
io.outCol.bits(i) := 0.U
for (j <- 0 until dim) {
when(countValue === j.U) {
io.outCol.bits(i) := regArrayT(j)(i)
}
}
}
when (io.inRow.fire && countWrap) {
state := 1.U
}
when (io.outCol.fire && countWrap) {
state := 0.U
}
assert(!(state === 0.U) || !io.outCol.fire)
assert(!(state === 1.U) || !io.inRow.fire)
}
| module PE_217( // @[Transposer.scala:100:9]
input clock, // @[Transposer.scala:100:9]
input reset, // @[Transposer.scala:100:9]
input [7:0] io_inR, // @[Transposer.scala:101:16]
input [7:0] io_inD, // @[Transposer.scala:101:16]
output [7:0] io_outL, // @[Transposer.scala:101:16]
output [7:0] io_outU, // @[Transposer.scala:101:16]
input io_dir, // @[Transposer.scala:101:16]
input io_en // @[Transposer.scala:101:16]
);
wire [7:0] io_inR_0 = io_inR; // @[Transposer.scala:100:9]
wire [7:0] io_inD_0 = io_inD; // @[Transposer.scala:100:9]
wire io_dir_0 = io_dir; // @[Transposer.scala:100:9]
wire io_en_0 = io_en; // @[Transposer.scala:100:9]
wire [7:0] io_outL_0; // @[Transposer.scala:100:9]
wire [7:0] io_outU_0; // @[Transposer.scala:100:9]
wire _reg_T = ~io_dir_0; // @[Transposer.scala:100:9, :110:36]
wire [7:0] _reg_T_1 = _reg_T ? io_inR_0 : io_inD_0; // @[Transposer.scala:100:9, :110:{28,36}]
reg [7:0] reg_0; // @[Transposer.scala:110:24]
assign io_outL_0 = reg_0; // @[Transposer.scala:100:9, :110:24]
assign io_outU_0 = reg_0; // @[Transposer.scala:100:9, :110:24]
always @(posedge clock) begin // @[Transposer.scala:100:9]
if (io_en_0) // @[Transposer.scala:100:9]
reg_0 <= _reg_T_1; // @[Transposer.scala:110:{24,28}]
always @(posedge)
assign io_outL = io_outL_0; // @[Transposer.scala:100:9]
assign io_outU = io_outU_0; // @[Transposer.scala:100:9]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File SynchronizerReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util.{RegEnable, Cat}
/** These wrap behavioral
* shift and next registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
*
* These are built up of *ResetSynchronizerPrimitiveShiftReg,
* intended to be replaced by the integrator's metastable flops chains or replaced
* at this level if they have a multi-bit wide synchronizer primitive.
* The different types vary in their reset behavior:
* NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin
* AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep
* 1-bit-wide shift registers.
* SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg
*
* [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference.
*
* ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross
* Clock Domains.
*/
object SynchronizerResetType extends Enumeration {
val NonSync, Inferred, Sync, Async = Value
}
// Note: this should not be used directly.
// Use the companion object to generate this with the correct reset type mixin.
private class SynchronizerPrimitiveShiftReg(
sync: Int,
init: Boolean,
resetType: SynchronizerResetType.Value)
extends AbstractPipelineReg(1) {
val initInt = if (init) 1 else 0
val initPostfix = resetType match {
case SynchronizerResetType.NonSync => ""
case _ => s"_i${initInt}"
}
override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}"
val chain = List.tabulate(sync) { i =>
val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B)
reg.suggestName(s"sync_$i")
}
chain.last := io.d.asBool
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink := source
}
io.q := chain.head.asUInt
}
private object SynchronizerPrimitiveShiftReg {
def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = {
val gen: () => SynchronizerPrimitiveShiftReg = resetType match {
case SynchronizerResetType.NonSync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
case SynchronizerResetType.Async =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset
case SynchronizerResetType.Sync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset
case SynchronizerResetType.Inferred =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
}
AbstractPipelineReg(gen(), in)
}
}
// Note: This module may end up with a non-AsyncReset type reset.
// But the Primitives within will always have AsyncReset type.
class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int)
extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asAsyncReset){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async)
}
}
io.q := Cat(output.reverse)
}
object AsyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
// Note: This module may end up with a non-Bool type reset.
// But the Primitives within will always have Bool reset type.
@deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2")
class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asBool){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync)
}
}
io.q := Cat(output.reverse)
}
object SyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred)
}
io.q := Cat(output.reverse)
}
object ResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}"
val output = Seq.tabulate(w) { i =>
SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync)
}
io.q := Cat(output.reverse)
}
object SynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, None)
def apply [T <: Data](in: T): T =
apply (in, 3, None)
}
class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module {
override def desiredName = s"ClockCrossingReg_w${w}"
val io = IO(new Bundle{
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
val en = Input(Bool())
})
val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en)
io.q := cdc_reg
}
object ClockCrossingReg {
def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = {
val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit))
name.foreach{ cdc_reg.suggestName(_) }
cdc_reg.io.d := in.asUInt
cdc_reg.io.en := en
cdc_reg.io.q.asTypeOf(in)
}
}
| module AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_211( // @[SynchronizerReg.scala:68:19]
input clock, // @[SynchronizerReg.scala:68:19]
input reset, // @[SynchronizerReg.scala:68:19]
output io_q // @[ShiftReg.scala:36:14]
);
wire io_d = 1'h1; // @[SynchronizerReg.scala:54:22, :68:19]
wire _sync_2_T = 1'h1; // @[SynchronizerReg.scala:54:22, :68:19]
wire io_q_0; // @[SynchronizerReg.scala:68:19]
reg sync_0; // @[SynchronizerReg.scala:51:87]
assign io_q_0 = sync_0; // @[SynchronizerReg.scala:51:87, :68:19]
reg sync_1; // @[SynchronizerReg.scala:51:87]
reg sync_2; // @[SynchronizerReg.scala:51:87]
always @(posedge clock or posedge reset) begin // @[SynchronizerReg.scala:68:19]
if (reset) begin // @[SynchronizerReg.scala:68:19]
sync_0 <= 1'h0; // @[SynchronizerReg.scala:51:87]
sync_1 <= 1'h0; // @[SynchronizerReg.scala:51:87]
sync_2 <= 1'h0; // @[SynchronizerReg.scala:51:87]
end
else begin // @[SynchronizerReg.scala:68:19]
sync_0 <= sync_1; // @[SynchronizerReg.scala:51:87]
sync_1 <= sync_2; // @[SynchronizerReg.scala:51:87]
sync_2 <= 1'h1; // @[SynchronizerReg.scala:51:87, :54:22, :68:19]
end
always @(posedge, posedge) |
Generate the Verilog code corresponding to the following Chisel files.
File Tile.scala:
// See README.md for license details.
package gemmini
import chisel3._
import chisel3.util._
import Util._
/**
* A Tile is a purely combinational 2D array of passThrough PEs.
* a, b, s, and in_propag are broadcast across the entire array and are passed through to the Tile's outputs
* @param width The data width of each PE in bits
* @param rows Number of PEs on each row
* @param columns Number of PEs on each column
*/
class Tile[T <: Data](inputType: T, outputType: T, accType: T, df: Dataflow.Value, tree_reduction: Boolean, max_simultaneous_matmuls: Int, val rows: Int, val columns: Int)(implicit ev: Arithmetic[T]) extends Module {
val io = IO(new Bundle {
val in_a = Input(Vec(rows, inputType))
val in_b = Input(Vec(columns, outputType)) // This is the output of the tile next to it
val in_d = Input(Vec(columns, outputType))
val in_control = Input(Vec(columns, new PEControl(accType)))
val in_id = Input(Vec(columns, UInt(log2Up(max_simultaneous_matmuls).W)))
val in_last = Input(Vec(columns, Bool()))
val out_a = Output(Vec(rows, inputType))
val out_c = Output(Vec(columns, outputType))
val out_b = Output(Vec(columns, outputType))
val out_control = Output(Vec(columns, new PEControl(accType)))
val out_id = Output(Vec(columns, UInt(log2Up(max_simultaneous_matmuls).W)))
val out_last = Output(Vec(columns, Bool()))
val in_valid = Input(Vec(columns, Bool()))
val out_valid = Output(Vec(columns, Bool()))
val bad_dataflow = Output(Bool())
})
import ev._
val tile = Seq.fill(rows, columns)(Module(new PE(inputType, outputType, accType, df, max_simultaneous_matmuls)))
val tileT = tile.transpose
// TODO: abstract hori/vert broadcast, all these connections look the same
// Broadcast 'a' horizontally across the Tile
for (r <- 0 until rows) {
tile(r).foldLeft(io.in_a(r)) {
case (in_a, pe) =>
pe.io.in_a := in_a
pe.io.out_a
}
}
// Broadcast 'b' vertically across the Tile
for (c <- 0 until columns) {
tileT(c).foldLeft(io.in_b(c)) {
case (in_b, pe) =>
pe.io.in_b := (if (tree_reduction) in_b.zero else in_b)
pe.io.out_b
}
}
// Broadcast 'd' vertically across the Tile
for (c <- 0 until columns) {
tileT(c).foldLeft(io.in_d(c)) {
case (in_d, pe) =>
pe.io.in_d := in_d
pe.io.out_c
}
}
// Broadcast 'control' vertically across the Tile
for (c <- 0 until columns) {
tileT(c).foldLeft(io.in_control(c)) {
case (in_ctrl, pe) =>
pe.io.in_control := in_ctrl
pe.io.out_control
}
}
// Broadcast 'garbage' vertically across the Tile
for (c <- 0 until columns) {
tileT(c).foldLeft(io.in_valid(c)) {
case (v, pe) =>
pe.io.in_valid := v
pe.io.out_valid
}
}
// Broadcast 'id' vertically across the Tile
for (c <- 0 until columns) {
tileT(c).foldLeft(io.in_id(c)) {
case (id, pe) =>
pe.io.in_id := id
pe.io.out_id
}
}
// Broadcast 'last' vertically across the Tile
for (c <- 0 until columns) {
tileT(c).foldLeft(io.in_last(c)) {
case (last, pe) =>
pe.io.in_last := last
pe.io.out_last
}
}
// Drive the Tile's bottom IO
for (c <- 0 until columns) {
io.out_c(c) := tile(rows-1)(c).io.out_c
io.out_control(c) := tile(rows-1)(c).io.out_control
io.out_id(c) := tile(rows-1)(c).io.out_id
io.out_last(c) := tile(rows-1)(c).io.out_last
io.out_valid(c) := tile(rows-1)(c).io.out_valid
io.out_b(c) := {
if (tree_reduction) {
val prods = tileT(c).map(_.io.out_b)
accumulateTree(prods :+ io.in_b(c))
} else {
tile(rows - 1)(c).io.out_b
}
}
}
io.bad_dataflow := tile.map(_.map(_.io.bad_dataflow).reduce(_||_)).reduce(_||_)
// Drive the Tile's right IO
for (r <- 0 until rows) {
io.out_a(r) := tile(r)(columns-1).io.out_a
}
}
| module Tile_247( // @[Tile.scala:16:7]
input clock, // @[Tile.scala:16:7]
input reset, // @[Tile.scala:16:7]
input [7:0] io_in_a_0, // @[Tile.scala:17:14]
input [19:0] io_in_b_0, // @[Tile.scala:17:14]
input [19:0] io_in_d_0, // @[Tile.scala:17:14]
input io_in_control_0_dataflow, // @[Tile.scala:17:14]
input io_in_control_0_propagate, // @[Tile.scala:17:14]
input [4:0] io_in_control_0_shift, // @[Tile.scala:17:14]
input [2:0] io_in_id_0, // @[Tile.scala:17:14]
input io_in_last_0, // @[Tile.scala:17:14]
output [7:0] io_out_a_0, // @[Tile.scala:17:14]
output [19:0] io_out_c_0, // @[Tile.scala:17:14]
output [19:0] io_out_b_0, // @[Tile.scala:17:14]
output io_out_control_0_dataflow, // @[Tile.scala:17:14]
output io_out_control_0_propagate, // @[Tile.scala:17:14]
output [4:0] io_out_control_0_shift, // @[Tile.scala:17:14]
output [2:0] io_out_id_0, // @[Tile.scala:17:14]
output io_out_last_0, // @[Tile.scala:17:14]
input io_in_valid_0, // @[Tile.scala:17:14]
output io_out_valid_0 // @[Tile.scala:17:14]
);
wire [7:0] io_in_a_0_0 = io_in_a_0; // @[Tile.scala:16:7]
wire [19:0] io_in_b_0_0 = io_in_b_0; // @[Tile.scala:16:7]
wire [19:0] io_in_d_0_0 = io_in_d_0; // @[Tile.scala:16:7]
wire io_in_control_0_dataflow_0 = io_in_control_0_dataflow; // @[Tile.scala:16:7]
wire io_in_control_0_propagate_0 = io_in_control_0_propagate; // @[Tile.scala:16:7]
wire [4:0] io_in_control_0_shift_0 = io_in_control_0_shift; // @[Tile.scala:16:7]
wire [2:0] io_in_id_0_0 = io_in_id_0; // @[Tile.scala:16:7]
wire io_in_last_0_0 = io_in_last_0; // @[Tile.scala:16:7]
wire io_in_valid_0_0 = io_in_valid_0; // @[Tile.scala:16:7]
wire io_bad_dataflow = 1'h0; // @[Tile.scala:16:7, :17:14, :42:44]
wire [7:0] io_out_a_0_0; // @[Tile.scala:16:7]
wire [19:0] io_out_c_0_0; // @[Tile.scala:16:7]
wire [19:0] io_out_b_0_0; // @[Tile.scala:16:7]
wire io_out_control_0_dataflow_0; // @[Tile.scala:16:7]
wire io_out_control_0_propagate_0; // @[Tile.scala:16:7]
wire [4:0] io_out_control_0_shift_0; // @[Tile.scala:16:7]
wire [2:0] io_out_id_0_0; // @[Tile.scala:16:7]
wire io_out_last_0_0; // @[Tile.scala:16:7]
wire io_out_valid_0_0; // @[Tile.scala:16:7]
PE_503 tile_0_0 ( // @[Tile.scala:42:44]
.clock (clock),
.reset (reset),
.io_in_a (io_in_a_0_0), // @[Tile.scala:16:7]
.io_in_b (io_in_b_0_0), // @[Tile.scala:16:7]
.io_in_d (io_in_d_0_0), // @[Tile.scala:16:7]
.io_out_a (io_out_a_0_0),
.io_out_b (io_out_b_0_0),
.io_out_c (io_out_c_0_0),
.io_in_control_dataflow (io_in_control_0_dataflow_0), // @[Tile.scala:16:7]
.io_in_control_propagate (io_in_control_0_propagate_0), // @[Tile.scala:16:7]
.io_in_control_shift (io_in_control_0_shift_0), // @[Tile.scala:16:7]
.io_out_control_dataflow (io_out_control_0_dataflow_0),
.io_out_control_propagate (io_out_control_0_propagate_0),
.io_out_control_shift (io_out_control_0_shift_0),
.io_in_id (io_in_id_0_0), // @[Tile.scala:16:7]
.io_out_id (io_out_id_0_0),
.io_in_last (io_in_last_0_0), // @[Tile.scala:16:7]
.io_out_last (io_out_last_0_0),
.io_in_valid (io_in_valid_0_0), // @[Tile.scala:16:7]
.io_out_valid (io_out_valid_0_0)
); // @[Tile.scala:42:44]
assign io_out_a_0 = io_out_a_0_0; // @[Tile.scala:16:7]
assign io_out_c_0 = io_out_c_0_0; // @[Tile.scala:16:7]
assign io_out_b_0 = io_out_b_0_0; // @[Tile.scala:16:7]
assign io_out_control_0_dataflow = io_out_control_0_dataflow_0; // @[Tile.scala:16:7]
assign io_out_control_0_propagate = io_out_control_0_propagate_0; // @[Tile.scala:16:7]
assign io_out_control_0_shift = io_out_control_0_shift_0; // @[Tile.scala:16:7]
assign io_out_id_0 = io_out_id_0_0; // @[Tile.scala:16:7]
assign io_out_last_0 = io_out_last_0_0; // @[Tile.scala:16:7]
assign io_out_valid_0 = io_out_valid_0_0; // @[Tile.scala:16:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File PE.scala:
// See README.md for license details.
package gemmini
import chisel3._
import chisel3.util._
class PEControl[T <: Data : Arithmetic](accType: T) extends Bundle {
val dataflow = UInt(1.W) // TODO make this an Enum
val propagate = UInt(1.W) // Which register should be propagated (and which should be accumulated)?
val shift = UInt(log2Up(accType.getWidth).W) // TODO this isn't correct for Floats
}
class MacUnit[T <: Data](inputType: T, cType: T, dType: T) (implicit ev: Arithmetic[T]) extends Module {
import ev._
val io = IO(new Bundle {
val in_a = Input(inputType)
val in_b = Input(inputType)
val in_c = Input(cType)
val out_d = Output(dType)
})
io.out_d := io.in_c.mac(io.in_a, io.in_b)
}
// TODO update documentation
/**
* A PE implementing a MAC operation. Configured as fully combinational when integrated into a Mesh.
* @param width Data width of operands
*/
class PE[T <: Data](inputType: T, outputType: T, accType: T, df: Dataflow.Value, max_simultaneous_matmuls: Int)
(implicit ev: Arithmetic[T]) extends Module { // Debugging variables
import ev._
val io = IO(new Bundle {
val in_a = Input(inputType)
val in_b = Input(outputType)
val in_d = Input(outputType)
val out_a = Output(inputType)
val out_b = Output(outputType)
val out_c = Output(outputType)
val in_control = Input(new PEControl(accType))
val out_control = Output(new PEControl(accType))
val in_id = Input(UInt(log2Up(max_simultaneous_matmuls).W))
val out_id = Output(UInt(log2Up(max_simultaneous_matmuls).W))
val in_last = Input(Bool())
val out_last = Output(Bool())
val in_valid = Input(Bool())
val out_valid = Output(Bool())
val bad_dataflow = Output(Bool())
})
val cType = if (df == Dataflow.WS) inputType else accType
// When creating PEs that support multiple dataflows, the
// elaboration/synthesis tools often fail to consolidate and de-duplicate
// MAC units. To force mac circuitry to be re-used, we create a "mac_unit"
// module here which just performs a single MAC operation
val mac_unit = Module(new MacUnit(inputType,
if (df == Dataflow.WS) outputType else accType, outputType))
val a = io.in_a
val b = io.in_b
val d = io.in_d
val c1 = Reg(cType)
val c2 = Reg(cType)
val dataflow = io.in_control.dataflow
val prop = io.in_control.propagate
val shift = io.in_control.shift
val id = io.in_id
val last = io.in_last
val valid = io.in_valid
io.out_a := a
io.out_control.dataflow := dataflow
io.out_control.propagate := prop
io.out_control.shift := shift
io.out_id := id
io.out_last := last
io.out_valid := valid
mac_unit.io.in_a := a
val last_s = RegEnable(prop, valid)
val flip = last_s =/= prop
val shift_offset = Mux(flip, shift, 0.U)
// Which dataflow are we using?
val OUTPUT_STATIONARY = Dataflow.OS.id.U(1.W)
val WEIGHT_STATIONARY = Dataflow.WS.id.U(1.W)
// Is c1 being computed on, or propagated forward (in the output-stationary dataflow)?
val COMPUTE = 0.U(1.W)
val PROPAGATE = 1.U(1.W)
io.bad_dataflow := false.B
when ((df == Dataflow.OS).B || ((df == Dataflow.BOTH).B && dataflow === OUTPUT_STATIONARY)) {
when(prop === PROPAGATE) {
io.out_c := (c1 >> shift_offset).clippedToWidthOf(outputType)
io.out_b := b
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c2
c2 := mac_unit.io.out_d
c1 := d.withWidthOf(cType)
}.otherwise {
io.out_c := (c2 >> shift_offset).clippedToWidthOf(outputType)
io.out_b := b
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c1
c1 := mac_unit.io.out_d
c2 := d.withWidthOf(cType)
}
}.elsewhen ((df == Dataflow.WS).B || ((df == Dataflow.BOTH).B && dataflow === WEIGHT_STATIONARY)) {
when(prop === PROPAGATE) {
io.out_c := c1
mac_unit.io.in_b := c2.asTypeOf(inputType)
mac_unit.io.in_c := b
io.out_b := mac_unit.io.out_d
c1 := d
}.otherwise {
io.out_c := c2
mac_unit.io.in_b := c1.asTypeOf(inputType)
mac_unit.io.in_c := b
io.out_b := mac_unit.io.out_d
c2 := d
}
}.otherwise {
io.bad_dataflow := true.B
//assert(false.B, "unknown dataflow")
io.out_c := DontCare
io.out_b := DontCare
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c2
}
when (!valid) {
c1 := c1
c2 := c2
mac_unit.io.in_b := DontCare
mac_unit.io.in_c := DontCare
}
}
File Arithmetic.scala:
// A simple type class for Chisel datatypes that can add and multiply. To add your own type, simply create your own:
// implicit MyTypeArithmetic extends Arithmetic[MyType] { ... }
package gemmini
import chisel3._
import chisel3.util._
import hardfloat._
// Bundles that represent the raw bits of custom datatypes
case class Float(expWidth: Int, sigWidth: Int) extends Bundle {
val bits = UInt((expWidth + sigWidth).W)
val bias: Int = (1 << (expWidth-1)) - 1
}
case class DummySInt(w: Int) extends Bundle {
val bits = UInt(w.W)
def dontCare: DummySInt = {
val o = Wire(new DummySInt(w))
o.bits := 0.U
o
}
}
// The Arithmetic typeclass which implements various arithmetic operations on custom datatypes
abstract class Arithmetic[T <: Data] {
implicit def cast(t: T): ArithmeticOps[T]
}
abstract class ArithmeticOps[T <: Data](self: T) {
def *(t: T): T
def mac(m1: T, m2: T): T // Returns (m1 * m2 + self)
def +(t: T): T
def -(t: T): T
def >>(u: UInt): T // This is a rounding shift! Rounds away from 0
def >(t: T): Bool
def identity: T
def withWidthOf(t: T): T
def clippedToWidthOf(t: T): T // Like "withWidthOf", except that it saturates
def relu: T
def zero: T
def minimum: T
// Optional parameters, which only need to be defined if you want to enable various optimizations for transformers
def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[T])] = None
def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[T])] = None
def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = None
def mult_with_reciprocal[U <: Data](reciprocal: U) = self
}
object Arithmetic {
implicit object UIntArithmetic extends Arithmetic[UInt] {
override implicit def cast(self: UInt) = new ArithmeticOps(self) {
override def *(t: UInt) = self * t
override def mac(m1: UInt, m2: UInt) = m1 * m2 + self
override def +(t: UInt) = self + t
override def -(t: UInt) = self - t
override def >>(u: UInt) = {
// The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm
// TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here?
val point_five = Mux(u === 0.U, 0.U, self(u - 1.U))
val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U
val ones_digit = self(u)
val r = point_five & (zeros | ones_digit)
(self >> u).asUInt + r
}
override def >(t: UInt): Bool = self > t
override def withWidthOf(t: UInt) = self.asTypeOf(t)
override def clippedToWidthOf(t: UInt) = {
val sat = ((1 << (t.getWidth-1))-1).U
Mux(self > sat, sat, self)(t.getWidth-1, 0)
}
override def relu: UInt = self
override def zero: UInt = 0.U
override def identity: UInt = 1.U
override def minimum: UInt = 0.U
}
}
implicit object SIntArithmetic extends Arithmetic[SInt] {
override implicit def cast(self: SInt) = new ArithmeticOps(self) {
override def *(t: SInt) = self * t
override def mac(m1: SInt, m2: SInt) = m1 * m2 + self
override def +(t: SInt) = self + t
override def -(t: SInt) = self - t
override def >>(u: UInt) = {
// The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm
// TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here?
val point_five = Mux(u === 0.U, 0.U, self(u - 1.U))
val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U
val ones_digit = self(u)
val r = (point_five & (zeros | ones_digit)).asBool
(self >> u).asSInt + Mux(r, 1.S, 0.S)
}
override def >(t: SInt): Bool = self > t
override def withWidthOf(t: SInt) = {
if (self.getWidth >= t.getWidth)
self(t.getWidth-1, 0).asSInt
else {
val sign_bits = t.getWidth - self.getWidth
val sign = self(self.getWidth-1)
Cat(Cat(Seq.fill(sign_bits)(sign)), self).asTypeOf(t)
}
}
override def clippedToWidthOf(t: SInt): SInt = {
val maxsat = ((1 << (t.getWidth-1))-1).S
val minsat = (-(1 << (t.getWidth-1))).S
MuxCase(self, Seq((self > maxsat) -> maxsat, (self < minsat) -> minsat))(t.getWidth-1, 0).asSInt
}
override def relu: SInt = Mux(self >= 0.S, self, 0.S)
override def zero: SInt = 0.S
override def identity: SInt = 1.S
override def minimum: SInt = (-(1 << (self.getWidth-1))).S
override def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = {
// TODO this uses a floating point divider, but we should use an integer divider instead
val input = Wire(Decoupled(denom_t.cloneType))
val output = Wire(Decoupled(self.cloneType))
// We translate our integer to floating-point form so that we can use the hardfloat divider
val expWidth = log2Up(self.getWidth) + 1
val sigWidth = self.getWidth
def sin_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def uin_to_float(x: UInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := false.B
in_to_rec_fn.io.in := x
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = sin_to_float(self)
val denom_rec = uin_to_float(input.bits)
// Instantiate the hardloat divider
val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options))
input.ready := divider.io.inReady
divider.io.inValid := input.valid
divider.io.sqrtOp := false.B
divider.io.a := self_rec
divider.io.b := denom_rec
divider.io.roundingMode := consts.round_minMag
divider.io.detectTininess := consts.tininess_afterRounding
output.valid := divider.io.outValid_div
output.bits := float_to_in(divider.io.out)
assert(!output.valid || output.ready)
Some((input, output))
}
override def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = {
// TODO this uses a floating point divider, but we should use an integer divider instead
val input = Wire(Decoupled(UInt(0.W)))
val output = Wire(Decoupled(self.cloneType))
input.bits := DontCare
// We translate our integer to floating-point form so that we can use the hardfloat divider
val expWidth = log2Up(self.getWidth) + 1
val sigWidth = self.getWidth
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = in_to_float(self)
// Instantiate the hardloat sqrt
val sqrter = Module(new DivSqrtRecFN_small(expWidth, sigWidth, 0))
input.ready := sqrter.io.inReady
sqrter.io.inValid := input.valid
sqrter.io.sqrtOp := true.B
sqrter.io.a := self_rec
sqrter.io.b := DontCare
sqrter.io.roundingMode := consts.round_minMag
sqrter.io.detectTininess := consts.tininess_afterRounding
output.valid := sqrter.io.outValid_sqrt
output.bits := float_to_in(sqrter.io.out)
assert(!output.valid || output.ready)
Some((input, output))
}
override def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = u match {
case Float(expWidth, sigWidth) =>
val input = Wire(Decoupled(UInt(0.W)))
val output = Wire(Decoupled(u.cloneType))
input.bits := DontCare
// We translate our integer to floating-point form so that we can use the hardfloat divider
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
val self_rec = in_to_float(self)
val one_rec = in_to_float(1.S)
// Instantiate the hardloat divider
val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options))
input.ready := divider.io.inReady
divider.io.inValid := input.valid
divider.io.sqrtOp := false.B
divider.io.a := one_rec
divider.io.b := self_rec
divider.io.roundingMode := consts.round_near_even
divider.io.detectTininess := consts.tininess_afterRounding
output.valid := divider.io.outValid_div
output.bits := fNFromRecFN(expWidth, sigWidth, divider.io.out).asTypeOf(u)
assert(!output.valid || output.ready)
Some((input, output))
case _ => None
}
override def mult_with_reciprocal[U <: Data](reciprocal: U): SInt = reciprocal match {
case recip @ Float(expWidth, sigWidth) =>
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = in_to_float(self)
val reciprocal_rec = recFNFromFN(expWidth, sigWidth, recip.bits)
// Instantiate the hardloat divider
val muladder = Module(new MulRecFN(expWidth, sigWidth))
muladder.io.roundingMode := consts.round_near_even
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := reciprocal_rec
float_to_in(muladder.io.out)
case _ => self
}
}
}
implicit object FloatArithmetic extends Arithmetic[Float] {
// TODO Floating point arithmetic currently switches between recoded and standard formats for every operation. However, it should stay in the recoded format as it travels through the systolic array
override implicit def cast(self: Float): ArithmeticOps[Float] = new ArithmeticOps(self) {
override def *(t: Float): Float = {
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth))
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := t_rec_resized
val out = Wire(Float(self.expWidth, self.sigWidth))
out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
out
}
override def mac(m1: Float, m2: Float): Float = {
// Recode all operands
val m1_rec = recFNFromFN(m1.expWidth, m1.sigWidth, m1.bits)
val m2_rec = recFNFromFN(m2.expWidth, m2.sigWidth, m2.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Resize m1 to self's width
val m1_resizer = Module(new RecFNToRecFN(m1.expWidth, m1.sigWidth, self.expWidth, self.sigWidth))
m1_resizer.io.in := m1_rec
m1_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
m1_resizer.io.detectTininess := consts.tininess_afterRounding
val m1_rec_resized = m1_resizer.io.out
// Resize m2 to self's width
val m2_resizer = Module(new RecFNToRecFN(m2.expWidth, m2.sigWidth, self.expWidth, self.sigWidth))
m2_resizer.io.in := m2_rec
m2_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
m2_resizer.io.detectTininess := consts.tininess_afterRounding
val m2_rec_resized = m2_resizer.io.out
// Perform multiply-add
val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth))
muladder.io.op := 0.U
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := m1_rec_resized
muladder.io.b := m2_rec_resized
muladder.io.c := self_rec
// Convert result to standard format // TODO remove these intermediate recodings
val out = Wire(Float(self.expWidth, self.sigWidth))
out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
out
}
override def +(t: Float): Float = {
require(self.getWidth >= t.getWidth) // This just makes it easier to write the resizing code
// Recode all operands
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Generate 1 as a float
val in_to_rec_fn = Module(new INToRecFN(1, self.expWidth, self.sigWidth))
in_to_rec_fn.io.signedIn := false.B
in_to_rec_fn.io.in := 1.U
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
val one_rec = in_to_rec_fn.io.out
// Resize t
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
// Perform addition
val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth))
muladder.io.op := 0.U
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := t_rec_resized
muladder.io.b := one_rec
muladder.io.c := self_rec
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
result
}
override def -(t: Float): Float = {
val t_sgn = t.bits(t.getWidth-1)
val neg_t = Cat(~t_sgn, t.bits(t.getWidth-2,0)).asTypeOf(t)
self + neg_t
}
override def >>(u: UInt): Float = {
// Recode self
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Get 2^(-u) as a recoded float
val shift_exp = Wire(UInt(self.expWidth.W))
shift_exp := self.bias.U - u
val shift_fn = Cat(0.U(1.W), shift_exp, 0.U((self.sigWidth-1).W))
val shift_rec = recFNFromFN(self.expWidth, self.sigWidth, shift_fn)
assert(shift_exp =/= 0.U, "scaling by denormalized numbers is not currently supported")
// Multiply self and 2^(-u)
val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth))
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := shift_rec
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
result
}
override def >(t: Float): Bool = {
// Recode all operands
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Resize t to self's width
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
val comparator = Module(new CompareRecFN(self.expWidth, self.sigWidth))
comparator.io.a := self_rec
comparator.io.b := t_rec_resized
comparator.io.signaling := false.B
comparator.io.gt
}
override def withWidthOf(t: Float): Float = {
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth))
resizer.io.in := self_rec
resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
resizer.io.detectTininess := consts.tininess_afterRounding
val result = Wire(Float(t.expWidth, t.sigWidth))
result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out)
result
}
override def clippedToWidthOf(t: Float): Float = {
// TODO check for overflow. Right now, we just assume that overflow doesn't happen
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth))
resizer.io.in := self_rec
resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
resizer.io.detectTininess := consts.tininess_afterRounding
val result = Wire(Float(t.expWidth, t.sigWidth))
result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out)
result
}
override def relu: Float = {
val raw = rawFloatFromFN(self.expWidth, self.sigWidth, self.bits)
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := Mux(!raw.isZero && raw.sign, 0.U, self.bits)
result
}
override def zero: Float = 0.U.asTypeOf(self)
override def identity: Float = Cat(0.U(2.W), ~(0.U((self.expWidth-1).W)), 0.U((self.sigWidth-1).W)).asTypeOf(self)
override def minimum: Float = Cat(1.U, ~(0.U(self.expWidth.W)), 0.U((self.sigWidth-1).W)).asTypeOf(self)
}
}
implicit object DummySIntArithmetic extends Arithmetic[DummySInt] {
override implicit def cast(self: DummySInt) = new ArithmeticOps(self) {
override def *(t: DummySInt) = self.dontCare
override def mac(m1: DummySInt, m2: DummySInt) = self.dontCare
override def +(t: DummySInt) = self.dontCare
override def -(t: DummySInt) = self.dontCare
override def >>(t: UInt) = self.dontCare
override def >(t: DummySInt): Bool = false.B
override def identity = self.dontCare
override def withWidthOf(t: DummySInt) = self.dontCare
override def clippedToWidthOf(t: DummySInt) = self.dontCare
override def relu = self.dontCare
override def zero = self.dontCare
override def minimum: DummySInt = self.dontCare
}
}
}
| module MacUnit_151( // @[PE.scala:14:7]
input clock, // @[PE.scala:14:7]
input reset, // @[PE.scala:14:7]
input [7:0] io_in_a, // @[PE.scala:16:14]
input [7:0] io_in_b, // @[PE.scala:16:14]
input [31:0] io_in_c, // @[PE.scala:16:14]
output [19:0] io_out_d // @[PE.scala:16:14]
);
wire [7:0] io_in_a_0 = io_in_a; // @[PE.scala:14:7]
wire [7:0] io_in_b_0 = io_in_b; // @[PE.scala:14:7]
wire [31:0] io_in_c_0 = io_in_c; // @[PE.scala:14:7]
wire [19:0] io_out_d_0; // @[PE.scala:14:7]
wire [15:0] _io_out_d_T = {{8{io_in_a_0[7]}}, io_in_a_0} * {{8{io_in_b_0[7]}}, io_in_b_0}; // @[PE.scala:14:7]
wire [32:0] _io_out_d_T_1 = {{17{_io_out_d_T[15]}}, _io_out_d_T} + {io_in_c_0[31], io_in_c_0}; // @[PE.scala:14:7]
wire [31:0] _io_out_d_T_2 = _io_out_d_T_1[31:0]; // @[Arithmetic.scala:93:54]
wire [31:0] _io_out_d_T_3 = _io_out_d_T_2; // @[Arithmetic.scala:93:54]
assign io_out_d_0 = _io_out_d_T_3[19:0]; // @[PE.scala:14:7, :23:12]
assign io_out_d = io_out_d_0; // @[PE.scala:14:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Nodes.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.nodes._
import freechips.rocketchip.util.{AsyncQueueParams,RationalDirection}
case object TLMonitorBuilder extends Field[TLMonitorArgs => TLMonitorBase](args => new TLMonitor(args))
object TLImp extends NodeImp[TLMasterPortParameters, TLSlavePortParameters, TLEdgeOut, TLEdgeIn, TLBundle]
{
def edgeO(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeOut(pd, pu, p, sourceInfo)
def edgeI(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeIn (pd, pu, p, sourceInfo)
def bundleO(eo: TLEdgeOut) = TLBundle(eo.bundle)
def bundleI(ei: TLEdgeIn) = TLBundle(ei.bundle)
def render(ei: TLEdgeIn) = RenderedEdge(colour = "#000000" /* black */, label = (ei.manager.beatBytes * 8).toString)
override def monitor(bundle: TLBundle, edge: TLEdgeIn): Unit = {
val monitor = Module(edge.params(TLMonitorBuilder)(TLMonitorArgs(edge)))
monitor.io.in := bundle
}
override def mixO(pd: TLMasterPortParameters, node: OutwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLMasterPortParameters =
pd.v1copy(clients = pd.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })
override def mixI(pu: TLSlavePortParameters, node: InwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLSlavePortParameters =
pu.v1copy(managers = pu.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })
}
trait TLFormatNode extends FormatNode[TLEdgeIn, TLEdgeOut]
case class TLClientNode(portParams: Seq[TLMasterPortParameters])(implicit valName: ValName) extends SourceNode(TLImp)(portParams) with TLFormatNode
case class TLManagerNode(portParams: Seq[TLSlavePortParameters])(implicit valName: ValName) extends SinkNode(TLImp)(portParams) with TLFormatNode
case class TLAdapterNode(
clientFn: TLMasterPortParameters => TLMasterPortParameters = { s => s },
managerFn: TLSlavePortParameters => TLSlavePortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLImp)(clientFn, managerFn) with TLFormatNode
case class TLJunctionNode(
clientFn: Seq[TLMasterPortParameters] => Seq[TLMasterPortParameters],
managerFn: Seq[TLSlavePortParameters] => Seq[TLSlavePortParameters])(
implicit valName: ValName)
extends JunctionNode(TLImp)(clientFn, managerFn) with TLFormatNode
case class TLIdentityNode()(implicit valName: ValName) extends IdentityNode(TLImp)() with TLFormatNode
object TLNameNode {
def apply(name: ValName) = TLIdentityNode()(name)
def apply(name: Option[String]): TLIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLIdentityNode = apply(Some(name))
}
case class TLEphemeralNode()(implicit valName: ValName) extends EphemeralNode(TLImp)()
object TLTempNode {
def apply(): TLEphemeralNode = TLEphemeralNode()(ValName("temp"))
}
case class TLNexusNode(
clientFn: Seq[TLMasterPortParameters] => TLMasterPortParameters,
managerFn: Seq[TLSlavePortParameters] => TLSlavePortParameters)(
implicit valName: ValName)
extends NexusNode(TLImp)(clientFn, managerFn) with TLFormatNode
abstract class TLCustomNode(implicit valName: ValName)
extends CustomNode(TLImp) with TLFormatNode
// Asynchronous crossings
trait TLAsyncFormatNode extends FormatNode[TLAsyncEdgeParameters, TLAsyncEdgeParameters]
object TLAsyncImp extends SimpleNodeImp[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncEdgeParameters, TLAsyncBundle]
{
def edge(pd: TLAsyncClientPortParameters, pu: TLAsyncManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLAsyncEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLAsyncEdgeParameters) = new TLAsyncBundle(e.bundle)
def render(e: TLAsyncEdgeParameters) = RenderedEdge(colour = "#ff0000" /* red */, label = e.manager.async.depth.toString)
override def mixO(pd: TLAsyncClientPortParameters, node: OutwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLAsyncManagerPortParameters, node: InwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLAsyncAdapterNode(
clientFn: TLAsyncClientPortParameters => TLAsyncClientPortParameters = { s => s },
managerFn: TLAsyncManagerPortParameters => TLAsyncManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLAsyncImp)(clientFn, managerFn) with TLAsyncFormatNode
case class TLAsyncIdentityNode()(implicit valName: ValName) extends IdentityNode(TLAsyncImp)() with TLAsyncFormatNode
object TLAsyncNameNode {
def apply(name: ValName) = TLAsyncIdentityNode()(name)
def apply(name: Option[String]): TLAsyncIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLAsyncIdentityNode = apply(Some(name))
}
case class TLAsyncSourceNode(sync: Option[Int])(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLAsyncImp)(
dFn = { p => TLAsyncClientPortParameters(p) },
uFn = { p => p.base.v1copy(minLatency = p.base.minLatency + sync.getOrElse(p.async.sync)) }) with FormatNode[TLEdgeIn, TLAsyncEdgeParameters] // discard cycles in other clock domain
case class TLAsyncSinkNode(async: AsyncQueueParams)(implicit valName: ValName)
extends MixedAdapterNode(TLAsyncImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = p.base.minLatency + async.sync) },
uFn = { p => TLAsyncManagerPortParameters(async, p) }) with FormatNode[TLAsyncEdgeParameters, TLEdgeOut]
// Rationally related crossings
trait TLRationalFormatNode extends FormatNode[TLRationalEdgeParameters, TLRationalEdgeParameters]
object TLRationalImp extends SimpleNodeImp[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalEdgeParameters, TLRationalBundle]
{
def edge(pd: TLRationalClientPortParameters, pu: TLRationalManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLRationalEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLRationalEdgeParameters) = new TLRationalBundle(e.bundle)
def render(e: TLRationalEdgeParameters) = RenderedEdge(colour = "#00ff00" /* green */)
override def mixO(pd: TLRationalClientPortParameters, node: OutwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLRationalManagerPortParameters, node: InwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLRationalAdapterNode(
clientFn: TLRationalClientPortParameters => TLRationalClientPortParameters = { s => s },
managerFn: TLRationalManagerPortParameters => TLRationalManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLRationalImp)(clientFn, managerFn) with TLRationalFormatNode
case class TLRationalIdentityNode()(implicit valName: ValName) extends IdentityNode(TLRationalImp)() with TLRationalFormatNode
object TLRationalNameNode {
def apply(name: ValName) = TLRationalIdentityNode()(name)
def apply(name: Option[String]): TLRationalIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLRationalIdentityNode = apply(Some(name))
}
case class TLRationalSourceNode()(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLRationalImp)(
dFn = { p => TLRationalClientPortParameters(p) },
uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLRationalEdgeParameters] // discard cycles from other clock domain
case class TLRationalSinkNode(direction: RationalDirection)(implicit valName: ValName)
extends MixedAdapterNode(TLRationalImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = 1) },
uFn = { p => TLRationalManagerPortParameters(direction, p) }) with FormatNode[TLRationalEdgeParameters, TLEdgeOut]
// Credited version of TileLink channels
trait TLCreditedFormatNode extends FormatNode[TLCreditedEdgeParameters, TLCreditedEdgeParameters]
object TLCreditedImp extends SimpleNodeImp[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedEdgeParameters, TLCreditedBundle]
{
def edge(pd: TLCreditedClientPortParameters, pu: TLCreditedManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLCreditedEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLCreditedEdgeParameters) = new TLCreditedBundle(e.bundle)
def render(e: TLCreditedEdgeParameters) = RenderedEdge(colour = "#ffff00" /* yellow */, e.delay.toString)
override def mixO(pd: TLCreditedClientPortParameters, node: OutwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLCreditedManagerPortParameters, node: InwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLCreditedAdapterNode(
clientFn: TLCreditedClientPortParameters => TLCreditedClientPortParameters = { s => s },
managerFn: TLCreditedManagerPortParameters => TLCreditedManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLCreditedImp)(clientFn, managerFn) with TLCreditedFormatNode
case class TLCreditedIdentityNode()(implicit valName: ValName) extends IdentityNode(TLCreditedImp)() with TLCreditedFormatNode
object TLCreditedNameNode {
def apply(name: ValName) = TLCreditedIdentityNode()(name)
def apply(name: Option[String]): TLCreditedIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLCreditedIdentityNode = apply(Some(name))
}
case class TLCreditedSourceNode(delay: TLCreditedDelay)(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLCreditedImp)(
dFn = { p => TLCreditedClientPortParameters(delay, p) },
uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLCreditedEdgeParameters] // discard cycles from other clock domain
case class TLCreditedSinkNode(delay: TLCreditedDelay)(implicit valName: ValName)
extends MixedAdapterNode(TLCreditedImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = 1) },
uFn = { p => TLCreditedManagerPortParameters(delay, p) }) with FormatNode[TLCreditedEdgeParameters, TLEdgeOut]
File RegisterRouter.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.nodes._
import freechips.rocketchip.diplomacy.{AddressSet, TransferSizes}
import freechips.rocketchip.resources.{Device, Resource, ResourceBindings}
import freechips.rocketchip.prci.{NoCrossing}
import freechips.rocketchip.regmapper.{RegField, RegMapper, RegMapperParams, RegMapperInput, RegisterRouter}
import freechips.rocketchip.util.{BundleField, ControlKey, ElaborationArtefacts, GenRegDescsAnno}
import scala.math.min
class TLRegisterRouterExtraBundle(val sourceBits: Int, val sizeBits: Int) extends Bundle {
val source = UInt((sourceBits max 1).W)
val size = UInt((sizeBits max 1).W)
}
case object TLRegisterRouterExtra extends ControlKey[TLRegisterRouterExtraBundle]("tlrr_extra")
case class TLRegisterRouterExtraField(sourceBits: Int, sizeBits: Int) extends BundleField[TLRegisterRouterExtraBundle](TLRegisterRouterExtra, Output(new TLRegisterRouterExtraBundle(sourceBits, sizeBits)), x => {
x.size := 0.U
x.source := 0.U
})
/** TLRegisterNode is a specialized TL SinkNode that encapsulates MMIO registers.
* It provides functionality for describing and outputting metdata about the registers in several formats.
* It also provides a concrete implementation of a regmap function that will be used
* to wire a map of internal registers associated with this node to the node's interconnect port.
*/
case class TLRegisterNode(
address: Seq[AddressSet],
device: Device,
deviceKey: String = "reg/control",
concurrency: Int = 0,
beatBytes: Int = 4,
undefZero: Boolean = true,
executable: Boolean = false)(
implicit valName: ValName)
extends SinkNode(TLImp)(Seq(TLSlavePortParameters.v1(
Seq(TLSlaveParameters.v1(
address = address,
resources = Seq(Resource(device, deviceKey)),
executable = executable,
supportsGet = TransferSizes(1, beatBytes),
supportsPutPartial = TransferSizes(1, beatBytes),
supportsPutFull = TransferSizes(1, beatBytes),
fifoId = Some(0))), // requests are handled in order
beatBytes = beatBytes,
minLatency = min(concurrency, 1)))) with TLFormatNode // the Queue adds at most one cycle
{
val size = 1 << log2Ceil(1 + address.map(_.max).max - address.map(_.base).min)
require (size >= beatBytes)
address.foreach { case a =>
require (a.widen(size-1).base == address.head.widen(size-1).base,
s"TLRegisterNode addresses (${address}) must be aligned to its size ${size}")
}
// Calling this method causes the matching TL2 bundle to be
// configured to route all requests to the listed RegFields.
def regmap(mapping: RegField.Map*) = {
val (bundleIn, edge) = this.in(0)
val a = bundleIn.a
val d = bundleIn.d
val fields = TLRegisterRouterExtraField(edge.bundle.sourceBits, edge.bundle.sizeBits) +: a.bits.params.echoFields
val params = RegMapperParams(log2Up(size/beatBytes), beatBytes, fields)
val in = Wire(Decoupled(new RegMapperInput(params)))
in.bits.read := a.bits.opcode === TLMessages.Get
in.bits.index := edge.addr_hi(a.bits)
in.bits.data := a.bits.data
in.bits.mask := a.bits.mask
Connectable.waiveUnmatched(in.bits.extra, a.bits.echo) match {
case (lhs, rhs) => lhs :<= rhs
}
val a_extra = in.bits.extra(TLRegisterRouterExtra)
a_extra.source := a.bits.source
a_extra.size := a.bits.size
// Invoke the register map builder
val out = RegMapper(beatBytes, concurrency, undefZero, in, mapping:_*)
// No flow control needed
in.valid := a.valid
a.ready := in.ready
d.valid := out.valid
out.ready := d.ready
// We must restore the size to enable width adapters to work
val d_extra = out.bits.extra(TLRegisterRouterExtra)
d.bits := edge.AccessAck(toSource = d_extra.source, lgSize = d_extra.size)
// avoid a Mux on the data bus by manually overriding two fields
d.bits.data := out.bits.data
Connectable.waiveUnmatched(d.bits.echo, out.bits.extra) match {
case (lhs, rhs) => lhs :<= rhs
}
d.bits.opcode := Mux(out.bits.read, TLMessages.AccessAckData, TLMessages.AccessAck)
// Tie off unused channels
bundleIn.b.valid := false.B
bundleIn.c.ready := true.B
bundleIn.e.ready := true.B
genRegDescsJson(mapping:_*)
}
def genRegDescsJson(mapping: RegField.Map*): Unit = {
// Dump out the register map for documentation purposes.
val base = address.head.base
val baseHex = s"0x${base.toInt.toHexString}"
val name = s"${device.describe(ResourceBindings()).name}.At${baseHex}"
val json = GenRegDescsAnno.serialize(base, name, mapping:_*)
var suffix = 0
while( ElaborationArtefacts.contains(s"${baseHex}.${suffix}.regmap.json")) {
suffix = suffix + 1
}
ElaborationArtefacts.add(s"${baseHex}.${suffix}.regmap.json", json)
val module = Module.currentModule.get.asInstanceOf[RawModule]
GenRegDescsAnno.anno(
module,
base,
mapping:_*)
}
}
/** Mix HasTLControlRegMap into any subclass of RegisterRouter to gain helper functions for attaching a device control register map to TileLink.
* - The intended use case is that controlNode will diplomatically publish a SW-visible device's memory-mapped control registers.
* - Use the clock crossing helper controlXing to externally connect controlNode to a TileLink interconnect.
* - Use the mapping helper function regmap to internally fill out the space of device control registers.
*/
trait HasTLControlRegMap { this: RegisterRouter =>
protected val controlNode = TLRegisterNode(
address = address,
device = device,
deviceKey = "reg/control",
concurrency = concurrency,
beatBytes = beatBytes,
undefZero = undefZero,
executable = executable)
// Externally, this helper should be used to connect the register control port to a bus
val controlXing: TLInwardClockCrossingHelper = this.crossIn(controlNode)
// Backwards-compatibility default node accessor with no clock crossing
lazy val node: TLInwardNode = controlXing(NoCrossing)
// Internally, this function should be used to populate the control port with registers
protected def regmap(mapping: RegField.Map*): Unit = { controlNode.regmap(mapping:_*) }
}
File TileResetSetter.scala:
package chipyard.clocking
import chisel3._
import chisel3.util._
import chisel3.experimental.Analog
import org.chipsalliance.cde.config._
import freechips.rocketchip.subsystem._
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.prci._
import freechips.rocketchip.util._
import freechips.rocketchip.tilelink._
import freechips.rocketchip.devices.tilelink._
import freechips.rocketchip.regmapper._
import freechips.rocketchip.subsystem._
// Currently only works if all tiles are already driven by independent clock groups
// TODO: After https://github.com/chipsalliance/rocket-chip/pull/2842 is merged, we should
// always put all tiles on independent clock groups
class TileResetSetter(address: BigInt, beatBytes: Int, tileNames: Seq[String], initResetHarts: Seq[Int])(implicit p: Parameters)
extends LazyModule {
val device = new SimpleDevice("tile-reset-setter", Nil)
val tlNode = TLRegisterNode(Seq(AddressSet(address, 4096-1)), device, "reg/control", beatBytes=beatBytes)
val clockNode = ClockGroupIdentityNode()
lazy val module = new LazyModuleImp(this) {
val nTiles = p(TilesLocated(InSubsystem)).size
require (nTiles <= 4096 / 4)
val tile_async_resets = Wire(Vec(nTiles, Reset()))
val r_tile_resets = (0 until nTiles).map({ i =>
tile_async_resets(i) := true.B.asAsyncReset // Remove this line after https://github.com/chipsalliance/rocket-chip/pull/2842
withReset (tile_async_resets(i)) {
Module(new AsyncResetRegVec(w=1, init=(if (initResetHarts.contains(i)) 1 else 0)))
}
})
if (nTiles > 0)
tlNode.regmap((0 until nTiles).map({ i =>
i * 4 -> Seq(RegField.rwReg(1, r_tile_resets(i).io))
}): _*)
val tileMap = tileNames.zipWithIndex.map({ case (n, i) =>
n -> (tile_async_resets(i), r_tile_resets(i).io.q, address + i * 4)
})
(clockNode.out zip clockNode.in).map { case ((o, _), (i, _)) =>
(o.member.elements zip i.member.elements).foreach { case ((name, oD), (_, iD)) =>
oD.clock := iD.clock
oD.reset := iD.reset
for ((n, (rIn, rOut, addr)) <- tileMap) {
if (name.contains(n)) {
println(s"${addr.toString(16)}: Tile $name reset control")
// Async because the reset coming out of the AsyncResetRegVec is
// clocked to the bus this is attached to, not the clock in this
// clock bundle. We expect a ClockGroupResetSynchronizer downstream
// to synchronize the resets
// Also, this or enforces that the tiles come out of reset after the reset of the system
oD.reset := (rOut.asBool || iD.reset.asBool).asAsyncReset
rIn := iD.reset
}
}
}
}
}
}
File MuxLiteral.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util.log2Ceil
import scala.reflect.ClassTag
/* MuxLiteral creates a lookup table from a key to a list of values.
* Unlike MuxLookup, the table keys must be exclusive literals.
*/
object MuxLiteral
{
def apply[T <: Data:ClassTag](index: UInt, default: T, first: (UInt, T), rest: (UInt, T)*): T =
apply(index, default, first :: rest.toList)
def apply[T <: Data:ClassTag](index: UInt, default: T, cases: Seq[(UInt, T)]): T =
MuxTable(index, default, cases.map { case (k, v) => (k.litValue, v) })
}
object MuxSeq
{
def apply[T <: Data:ClassTag](index: UInt, default: T, first: T, rest: T*): T =
apply(index, default, first :: rest.toList)
def apply[T <: Data:ClassTag](index: UInt, default: T, cases: Seq[T]): T =
MuxTable(index, default, cases.zipWithIndex.map { case (v, i) => (BigInt(i), v) })
}
object MuxTable
{
def apply[T <: Data:ClassTag](index: UInt, default: T, first: (BigInt, T), rest: (BigInt, T)*): T =
apply(index, default, first :: rest.toList)
def apply[T <: Data:ClassTag](index: UInt, default: T, cases: Seq[(BigInt, T)]): T = {
/* All keys must be >= 0 and distinct */
cases.foreach { case (k, _) => require (k >= 0) }
require (cases.map(_._1).distinct.size == cases.size)
/* Filter out any cases identical to the default */
val simple = cases.filter { case (k, v) => !default.isLit || !v.isLit || v.litValue != default.litValue }
val maxKey = (BigInt(0) +: simple.map(_._1)).max
val endIndex = BigInt(1) << log2Ceil(maxKey+1)
if (simple.isEmpty) {
default
} else if (endIndex <= 2*simple.size) {
/* The dense encoding case uses a Vec */
val table = Array.fill(endIndex.toInt) { default }
simple.foreach { case (k, v) => table(k.toInt) = v }
Mux(index >= endIndex.U, default, VecInit(table)(index))
} else {
/* The sparse encoding case uses switch */
val out = WireDefault(default)
simple.foldLeft(new chisel3.util.SwitchContext(index, None, Set.empty)) { case (acc, (k, v)) =>
acc.is (k.U) { out := v }
}
out
}
}
}
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
File MixedNode.scala:
package org.chipsalliance.diplomacy.nodes
import chisel3.{Data, DontCare, Wire}
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.{Field, Parameters}
import org.chipsalliance.diplomacy.ValName
import org.chipsalliance.diplomacy.sourceLine
/** One side metadata of a [[Dangle]].
*
* Describes one side of an edge going into or out of a [[BaseNode]].
*
* @param serial
* the global [[BaseNode.serial]] number of the [[BaseNode]] that this [[HalfEdge]] connects to.
* @param index
* the `index` in the [[BaseNode]]'s input or output port list that this [[HalfEdge]] belongs to.
*/
case class HalfEdge(serial: Int, index: Int) extends Ordered[HalfEdge] {
import scala.math.Ordered.orderingToOrdered
def compare(that: HalfEdge): Int = HalfEdge.unapply(this).compare(HalfEdge.unapply(that))
}
/** [[Dangle]] captures the `IO` information of a [[LazyModule]] and which two [[BaseNode]]s the [[Edges]]/[[Bundle]]
* connects.
*
* [[Dangle]]s are generated by [[BaseNode.instantiate]] using [[MixedNode.danglesOut]] and [[MixedNode.danglesIn]] ,
* [[LazyModuleImp.instantiate]] connects those that go to internal or explicit IO connections in a [[LazyModule]].
*
* @param source
* the source [[HalfEdge]] of this [[Dangle]], which captures the source [[BaseNode]] and the port `index` within
* that [[BaseNode]].
* @param sink
* sink [[HalfEdge]] of this [[Dangle]], which captures the sink [[BaseNode]] and the port `index` within that
* [[BaseNode]].
* @param flipped
* flip or not in [[AutoBundle.makeElements]]. If true this corresponds to `danglesOut`, if false it corresponds to
* `danglesIn`.
* @param dataOpt
* actual [[Data]] for the hardware connection. Can be empty if this belongs to a cloned module
*/
case class Dangle(source: HalfEdge, sink: HalfEdge, flipped: Boolean, name: String, dataOpt: Option[Data]) {
def data = dataOpt.get
}
/** [[Edges]] is a collection of parameters describing the functionality and connection for an interface, which is often
* derived from the interconnection protocol and can inform the parameterization of the hardware bundles that actually
* implement the protocol.
*/
case class Edges[EI, EO](in: Seq[EI], out: Seq[EO])
/** A field available in [[Parameters]] used to determine whether [[InwardNodeImp.monitor]] will be called. */
case object MonitorsEnabled extends Field[Boolean](true)
/** When rendering the edge in a graphical format, flip the order in which the edges' source and sink are presented.
*
* For example, when rendering graphML, yEd by default tries to put the source node vertically above the sink node, but
* [[RenderFlipped]] inverts this relationship. When a particular [[LazyModule]] contains both source nodes and sink
* nodes, flipping the rendering of one node's edge will usual produce a more concise visual layout for the
* [[LazyModule]].
*/
case object RenderFlipped extends Field[Boolean](false)
/** The sealed node class in the package, all node are derived from it.
*
* @param inner
* Sink interface implementation.
* @param outer
* Source interface implementation.
* @param valName
* val name of this node.
* @tparam DI
* Downward-flowing parameters received on the inner side of the node. It is usually a brunch of parameters
* describing the protocol parameters from a source. For an [[InwardNode]], it is determined by the connected
* [[OutwardNode]]. Since it can be connected to multiple sources, this parameter is always a Seq of source port
* parameters.
* @tparam UI
* Upward-flowing parameters generated by the inner side of the node. It is usually a brunch of parameters describing
* the protocol parameters of a sink. For an [[InwardNode]], it is determined itself.
* @tparam EI
* Edge Parameters describing a connection on the inner side of the node. It is usually a brunch of transfers
* specified for a sink according to protocol.
* @tparam BI
* Bundle type used when connecting to the inner side of the node. It is a hardware interface of this sink interface.
* It should extends from [[chisel3.Data]], which represents the real hardware.
* @tparam DO
* Downward-flowing parameters generated on the outer side of the node. It is usually a brunch of parameters
* describing the protocol parameters of a source. For an [[OutwardNode]], it is determined itself.
* @tparam UO
* Upward-flowing parameters received by the outer side of the node. It is usually a brunch of parameters describing
* the protocol parameters from a sink. For an [[OutwardNode]], it is determined by the connected [[InwardNode]].
* Since it can be connected to multiple sinks, this parameter is always a Seq of sink port parameters.
* @tparam EO
* Edge Parameters describing a connection on the outer side of the node. It is usually a brunch of transfers
* specified for a source according to protocol.
* @tparam BO
* Bundle type used when connecting to the outer side of the node. It is a hardware interface of this source
* interface. It should extends from [[chisel3.Data]], which represents the real hardware.
*
* @note
* Call Graph of [[MixedNode]]
* - line `─`: source is process by a function and generate pass to others
* - Arrow `→`: target of arrow is generated by source
*
* {{{
* (from the other node)
* ┌─────────────────────────────────────────────────────────[[InwardNode.uiParams]]─────────────┐
* ↓ │
* (binding node when elaboration) [[OutwardNode.uoParams]]────────────────────────[[MixedNode.mapParamsU]]→──────────┐ │
* [[InwardNode.accPI]] │ │ │
* │ │ (based on protocol) │
* │ │ [[MixedNode.inner.edgeI]] │
* │ │ ↓ │
* ↓ │ │ │
* (immobilize after elaboration) (inward port from [[OutwardNode]]) │ ↓ │
* [[InwardNode.iBindings]]──┐ [[MixedNode.iDirectPorts]]────────────────────→[[MixedNode.iPorts]] [[InwardNode.uiParams]] │
* │ │ ↑ │ │ │
* │ │ │ [[OutwardNode.doParams]] │ │
* │ │ │ (from the other node) │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* │ │ │ └────────┬──────────────┤ │
* │ │ │ │ │ │
* │ │ │ │ (based on protocol) │
* │ │ │ │ [[MixedNode.inner.edgeI]] │
* │ │ │ │ │ │
* │ │ (from the other node) │ ↓ │
* │ └───[[OutwardNode.oPortMapping]] [[OutwardNode.oStar]] │ [[MixedNode.edgesIn]]───┐ │
* │ ↑ ↑ │ │ ↓ │
* │ │ │ │ │ [[MixedNode.in]] │
* │ │ │ │ ↓ ↑ │
* │ (solve star connection) │ │ │ [[MixedNode.bundleIn]]──┘ │
* ├───[[MixedNode.resolveStar]]→─┼─────────────────────────────┤ └────────────────────────────────────┐ │
* │ │ │ [[MixedNode.bundleOut]]─┐ │ │
* │ │ │ ↑ ↓ │ │
* │ │ │ │ [[MixedNode.out]] │ │
* │ ↓ ↓ │ ↑ │ │
* │ ┌─────[[InwardNode.iPortMapping]] [[InwardNode.iStar]] [[MixedNode.edgesOut]]──┘ │ │
* │ │ (from the other node) ↑ │ │
* │ │ │ │ │ │
* │ │ │ [[MixedNode.outer.edgeO]] │ │
* │ │ │ (based on protocol) │ │
* │ │ │ │ │ │
* │ │ │ ┌────────────────────────────────────────┤ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* (immobilize after elaboration)│ ↓ │ │ │ │
* [[OutwardNode.oBindings]]─┘ [[MixedNode.oDirectPorts]]───→[[MixedNode.oPorts]] [[OutwardNode.doParams]] │ │
* ↑ (inward port from [[OutwardNode]]) │ │ │ │
* │ ┌─────────────────────────────────────────┤ │ │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* [[OutwardNode.accPO]] │ ↓ │ │ │
* (binding node when elaboration) │ [[InwardNode.diParams]]─────→[[MixedNode.mapParamsD]]────────────────────────────┘ │ │
* │ ↑ │ │
* │ └──────────────────────────────────────────────────────────────────────────────────────────┘ │
* └──────────────────────────────────────────────────────────────────────────────────────────────────────────┘
* }}}
*/
abstract class MixedNode[DI, UI, EI, BI <: Data, DO, UO, EO, BO <: Data](
val inner: InwardNodeImp[DI, UI, EI, BI],
val outer: OutwardNodeImp[DO, UO, EO, BO]
)(
implicit valName: ValName)
extends BaseNode
with NodeHandle[DI, UI, EI, BI, DO, UO, EO, BO]
with InwardNode[DI, UI, BI]
with OutwardNode[DO, UO, BO] {
// Generate a [[NodeHandle]] with inward and outward node are both this node.
val inward = this
val outward = this
/** Debug info of nodes binding. */
def bindingInfo: String = s"""$iBindingInfo
|$oBindingInfo
|""".stripMargin
/** Debug info of ports connecting. */
def connectedPortsInfo: String = s"""${oPorts.size} outward ports connected: [${oPorts.map(_._2.name).mkString(",")}]
|${iPorts.size} inward ports connected: [${iPorts.map(_._2.name).mkString(",")}]
|""".stripMargin
/** Debug info of parameters propagations. */
def parametersInfo: String = s"""${doParams.size} downstream outward parameters: [${doParams.mkString(",")}]
|${uoParams.size} upstream outward parameters: [${uoParams.mkString(",")}]
|${diParams.size} downstream inward parameters: [${diParams.mkString(",")}]
|${uiParams.size} upstream inward parameters: [${uiParams.mkString(",")}]
|""".stripMargin
/** For a given node, converts [[OutwardNode.accPO]] and [[InwardNode.accPI]] to [[MixedNode.oPortMapping]] and
* [[MixedNode.iPortMapping]].
*
* Given counts of known inward and outward binding and inward and outward star bindings, return the resolved inward
* stars and outward stars.
*
* This method will also validate the arguments and throw a runtime error if the values are unsuitable for this type
* of node.
*
* @param iKnown
* Number of known-size ([[BIND_ONCE]]) input bindings.
* @param oKnown
* Number of known-size ([[BIND_ONCE]]) output bindings.
* @param iStar
* Number of unknown size ([[BIND_STAR]]) input bindings.
* @param oStar
* Number of unknown size ([[BIND_STAR]]) output bindings.
* @return
* A Tuple of the resolved number of input and output connections.
*/
protected[diplomacy] def resolveStar(iKnown: Int, oKnown: Int, iStar: Int, oStar: Int): (Int, Int)
/** Function to generate downward-flowing outward params from the downward-flowing input params and the current output
* ports.
*
* @param n
* The size of the output sequence to generate.
* @param p
* Sequence of downward-flowing input parameters of this node.
* @return
* A `n`-sized sequence of downward-flowing output edge parameters.
*/
protected[diplomacy] def mapParamsD(n: Int, p: Seq[DI]): Seq[DO]
/** Function to generate upward-flowing input parameters from the upward-flowing output parameters [[uiParams]].
*
* @param n
* Size of the output sequence.
* @param p
* Upward-flowing output edge parameters.
* @return
* A n-sized sequence of upward-flowing input edge parameters.
*/
protected[diplomacy] def mapParamsU(n: Int, p: Seq[UO]): Seq[UI]
/** @return
* The sink cardinality of the node, the number of outputs bound with [[BIND_QUERY]] summed with inputs bound with
* [[BIND_STAR]].
*/
protected[diplomacy] lazy val sinkCard: Int = oBindings.count(_._3 == BIND_QUERY) + iBindings.count(_._3 == BIND_STAR)
/** @return
* The source cardinality of this node, the number of inputs bound with [[BIND_QUERY]] summed with the number of
* output bindings bound with [[BIND_STAR]].
*/
protected[diplomacy] lazy val sourceCard: Int =
iBindings.count(_._3 == BIND_QUERY) + oBindings.count(_._3 == BIND_STAR)
/** @return list of nodes involved in flex bindings with this node. */
protected[diplomacy] lazy val flexes: Seq[BaseNode] =
oBindings.filter(_._3 == BIND_FLEX).map(_._2) ++ iBindings.filter(_._3 == BIND_FLEX).map(_._2)
/** Resolves the flex to be either source or sink and returns the offset where the [[BIND_STAR]] operators begin
* greedily taking up the remaining connections.
*
* @return
* A value >= 0 if it is sink cardinality, a negative value for source cardinality. The magnitude of the return
* value is not relevant.
*/
protected[diplomacy] lazy val flexOffset: Int = {
/** Recursively performs a depth-first search of the [[flexes]], [[BaseNode]]s connected to this node with flex
* operators. The algorithm bottoms out when we either get to a node we have already visited or when we get to a
* connection that is not a flex and can set the direction for us. Otherwise, recurse by visiting the `flexes` of
* each node in the current set and decide whether they should be added to the set or not.
*
* @return
* the mapping of [[BaseNode]] indexed by their serial numbers.
*/
def DFS(v: BaseNode, visited: Map[Int, BaseNode]): Map[Int, BaseNode] = {
if (visited.contains(v.serial) || !v.flexibleArityDirection) {
visited
} else {
v.flexes.foldLeft(visited + (v.serial -> v))((sum, n) => DFS(n, sum))
}
}
/** Determine which [[BaseNode]] are involved in resolving the flex connections to/from this node.
*
* @example
* {{{
* a :*=* b :*=* c
* d :*=* b
* e :*=* f
* }}}
*
* `flexSet` for `a`, `b`, `c`, or `d` will be `Set(a, b, c, d)` `flexSet` for `e` or `f` will be `Set(e,f)`
*/
val flexSet = DFS(this, Map()).values
/** The total number of :*= operators where we're on the left. */
val allSink = flexSet.map(_.sinkCard).sum
/** The total number of :=* operators used when we're on the right. */
val allSource = flexSet.map(_.sourceCard).sum
require(
allSink == 0 || allSource == 0,
s"The nodes ${flexSet.map(_.name)} which are inter-connected by :*=* have ${allSink} :*= operators and ${allSource} :=* operators connected to them, making it impossible to determine cardinality inference direction."
)
allSink - allSource
}
/** @return A value >= 0 if it is sink cardinality, a negative value for source cardinality. */
protected[diplomacy] def edgeArityDirection(n: BaseNode): Int = {
if (flexibleArityDirection) flexOffset
else if (n.flexibleArityDirection) n.flexOffset
else 0
}
/** For a node which is connected between two nodes, select the one that will influence the direction of the flex
* resolution.
*/
protected[diplomacy] def edgeAritySelect(n: BaseNode, l: => Int, r: => Int): Int = {
val dir = edgeArityDirection(n)
if (dir < 0) l
else if (dir > 0) r
else 1
}
/** Ensure that the same node is not visited twice in resolving `:*=`, etc operators. */
private var starCycleGuard = false
/** Resolve all the star operators into concrete indicies. As connections are being made, some may be "star"
* connections which need to be resolved. In some way to determine how many actual edges they correspond to. We also
* need to build up the ranges of edges which correspond to each binding operator, so that We can apply the correct
* edge parameters and later build up correct bundle connections.
*
* [[oPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that oPort (binding
* operator). [[iPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that iPort
* (binding operator). [[oStar]]: `Int` the value to return for this node `N` for any `N :*= foo` or `N :*=* foo :*=
* bar` [[iStar]]: `Int` the value to return for this node `N` for any `foo :=* N` or `bar :=* foo :*=* N`
*/
protected[diplomacy] lazy val (
oPortMapping: Seq[(Int, Int)],
iPortMapping: Seq[(Int, Int)],
oStar: Int,
iStar: Int
) = {
try {
if (starCycleGuard) throw StarCycleException()
starCycleGuard = true
// For a given node N...
// Number of foo :=* N
// + Number of bar :=* foo :*=* N
val oStars = oBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) < 0)
}
// Number of N :*= foo
// + Number of N :*=* foo :*= bar
val iStars = iBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) > 0)
}
// 1 for foo := N
// + bar.iStar for bar :*= foo :*=* N
// + foo.iStar for foo :*= N
// + 0 for foo :=* N
val oKnown = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, 0, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => 0
}
}.sum
// 1 for N := foo
// + bar.oStar for N :*=* foo :=* bar
// + foo.oStar for N :=* foo
// + 0 for N :*= foo
val iKnown = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, 0)
case BIND_QUERY => n.oStar
case BIND_STAR => 0
}
}.sum
// Resolve star depends on the node subclass to implement the algorithm for this.
val (iStar, oStar) = resolveStar(iKnown, oKnown, iStars, oStars)
// Cumulative list of resolved outward binding range starting points
val oSum = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, oStar, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => oStar
}
}.scanLeft(0)(_ + _)
// Cumulative list of resolved inward binding range starting points
val iSum = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, iStar)
case BIND_QUERY => n.oStar
case BIND_STAR => iStar
}
}.scanLeft(0)(_ + _)
// Create ranges for each binding based on the running sums and return
// those along with resolved values for the star operations.
(oSum.init.zip(oSum.tail), iSum.init.zip(iSum.tail), oStar, iStar)
} catch {
case c: StarCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Sequence of inward ports.
*
* This should be called after all star bindings are resolved.
*
* Each element is: `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding.
* `n` Instance of inward node. `p` View of [[Parameters]] where this connection was made. `s` Source info where this
* connection was made in the source code.
*/
protected[diplomacy] lazy val oDirectPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] =
oBindings.flatMap { case (i, n, _, p, s) =>
// for each binding operator in this node, look at what it connects to
val (start, end) = n.iPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
/** Sequence of outward ports.
*
* This should be called after all star bindings are resolved.
*
* `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. `n` Instance of
* outward node. `p` View of [[Parameters]] where this connection was made. `s` [[SourceInfo]] where this connection
* was made in the source code.
*/
protected[diplomacy] lazy val iDirectPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] =
iBindings.flatMap { case (i, n, _, p, s) =>
// query this port index range of this node in the other side of node.
val (start, end) = n.oPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
// Ephemeral nodes ( which have non-None iForward/oForward) have in_degree = out_degree
// Thus, there must exist an Eulerian path and the below algorithms terminate
@scala.annotation.tailrec
private def oTrace(
tuple: (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)
): (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.iForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => oTrace((j, m, p, s))
}
}
@scala.annotation.tailrec
private def iTrace(
tuple: (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)
): (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.oForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => iTrace((j, m, p, s))
}
}
/** Final output ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - Numeric index of this binding in the [[InwardNode]] on the other end.
* - [[InwardNode]] on the other end of this binding.
* - A view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val oPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oDirectPorts.map(oTrace)
/** Final input ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - numeric index of this binding in [[OutwardNode]] on the other end.
* - [[OutwardNode]] on the other end of this binding.
* - a view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val iPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iDirectPorts.map(iTrace)
private var oParamsCycleGuard = false
protected[diplomacy] lazy val diParams: Seq[DI] = iPorts.map { case (i, n, _, _) => n.doParams(i) }
protected[diplomacy] lazy val doParams: Seq[DO] = {
try {
if (oParamsCycleGuard) throw DownwardCycleException()
oParamsCycleGuard = true
val o = mapParamsD(oPorts.size, diParams)
require(
o.size == oPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of outward ports should equal the number of produced outward parameters.
|$context
|$connectedPortsInfo
|Downstreamed inward parameters: [${diParams.mkString(",")}]
|Produced outward parameters: [${o.mkString(",")}]
|""".stripMargin
)
o.map(outer.mixO(_, this))
} catch {
case c: DownwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
private var iParamsCycleGuard = false
protected[diplomacy] lazy val uoParams: Seq[UO] = oPorts.map { case (o, n, _, _) => n.uiParams(o) }
protected[diplomacy] lazy val uiParams: Seq[UI] = {
try {
if (iParamsCycleGuard) throw UpwardCycleException()
iParamsCycleGuard = true
val i = mapParamsU(iPorts.size, uoParams)
require(
i.size == iPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of inward ports should equal the number of produced inward parameters.
|$context
|$connectedPortsInfo
|Upstreamed outward parameters: [${uoParams.mkString(",")}]
|Produced inward parameters: [${i.mkString(",")}]
|""".stripMargin
)
i.map(inner.mixI(_, this))
} catch {
case c: UpwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Outward edge parameters. */
protected[diplomacy] lazy val edgesOut: Seq[EO] =
(oPorts.zip(doParams)).map { case ((i, n, p, s), o) => outer.edgeO(o, n.uiParams(i), p, s) }
/** Inward edge parameters. */
protected[diplomacy] lazy val edgesIn: Seq[EI] =
(iPorts.zip(uiParams)).map { case ((o, n, p, s), i) => inner.edgeI(n.doParams(o), i, p, s) }
/** A tuple of the input edge parameters and output edge parameters for the edges bound to this node.
*
* If you need to access to the edges of a foreign Node, use this method (in/out create bundles).
*/
lazy val edges: Edges[EI, EO] = Edges(edgesIn, edgesOut)
/** Create actual Wires corresponding to the Bundles parameterized by the outward edges of this node. */
protected[diplomacy] lazy val bundleOut: Seq[BO] = edgesOut.map { e =>
val x = Wire(outer.bundleO(e)).suggestName(s"${valName.value}Out")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
/** Create actual Wires corresponding to the Bundles parameterized by the inward edges of this node. */
protected[diplomacy] lazy val bundleIn: Seq[BI] = edgesIn.map { e =>
val x = Wire(inner.bundleI(e)).suggestName(s"${valName.value}In")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
private def emptyDanglesOut: Seq[Dangle] = oPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(serial, i),
sink = HalfEdge(n.serial, j),
flipped = false,
name = wirePrefix + "out",
dataOpt = None
)
}
private def emptyDanglesIn: Seq[Dangle] = iPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(n.serial, j),
sink = HalfEdge(serial, i),
flipped = true,
name = wirePrefix + "in",
dataOpt = None
)
}
/** Create the [[Dangle]]s which describe the connections from this node output to other nodes inputs. */
protected[diplomacy] def danglesOut: Seq[Dangle] = emptyDanglesOut.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleOut(i)))
}
/** Create the [[Dangle]]s which describe the connections from this node input from other nodes outputs. */
protected[diplomacy] def danglesIn: Seq[Dangle] = emptyDanglesIn.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleIn(i)))
}
private[diplomacy] var instantiated = false
/** Gather Bundle and edge parameters of outward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def out: Seq[(BO, EO)] = {
require(
instantiated,
s"$name.out should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleOut.zip(edgesOut)
}
/** Gather Bundle and edge parameters of inward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def in: Seq[(BI, EI)] = {
require(
instantiated,
s"$name.in should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleIn.zip(edgesIn)
}
/** Actually instantiate this node during [[LazyModuleImp]] evaluation. Mark that it's safe to use the Bundle wires,
* instantiate monitors on all input ports if appropriate, and return all the dangles of this node.
*/
protected[diplomacy] def instantiate(): Seq[Dangle] = {
instantiated = true
if (!circuitIdentity) {
(iPorts.zip(in)).foreach { case ((_, _, p, _), (b, e)) => if (p(MonitorsEnabled)) inner.monitor(b, e) }
}
danglesOut ++ danglesIn
}
protected[diplomacy] def cloneDangles(): Seq[Dangle] = emptyDanglesOut ++ emptyDanglesIn
/** Connects the outward part of a node with the inward part of this node. */
protected[diplomacy] def bind(
h: OutwardNode[DI, UI, BI],
binding: NodeBinding
)(
implicit p: Parameters,
sourceInfo: SourceInfo
): Unit = {
val x = this // x := y
val y = h
sourceLine(sourceInfo, " at ", "")
val i = x.iPushed
val o = y.oPushed
y.oPush(
i,
x,
binding match {
case BIND_ONCE => BIND_ONCE
case BIND_FLEX => BIND_FLEX
case BIND_STAR => BIND_QUERY
case BIND_QUERY => BIND_STAR
}
)
x.iPush(o, y, binding)
}
/* Metadata for printing the node graph. */
def inputs: Seq[(OutwardNode[DI, UI, BI], RenderedEdge)] = (iPorts.zip(edgesIn)).map { case ((_, n, p, _), e) =>
val re = inner.render(e)
(n, re.copy(flipped = re.flipped != p(RenderFlipped)))
}
/** Metadata for printing the node graph */
def outputs: Seq[(InwardNode[DO, UO, BO], RenderedEdge)] = oPorts.map { case (i, n, _, _) => (n, n.inputs(i)._2) }
}
File Edges.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
class TLEdge(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdgeParameters(client, manager, params, sourceInfo)
{
def isAligned(address: UInt, lgSize: UInt): Bool = {
if (maxLgSize == 0) true.B else {
val mask = UIntToOH1(lgSize, maxLgSize)
(address & mask) === 0.U
}
}
def mask(address: UInt, lgSize: UInt): UInt =
MaskGen(address, lgSize, manager.beatBytes)
def staticHasData(bundle: TLChannel): Option[Boolean] = {
bundle match {
case _:TLBundleA => {
// Do there exist A messages with Data?
val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial
// Do there exist A messages without Data?
val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint
// Statically optimize the case where hasData is a constant
if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None
}
case _:TLBundleB => {
// Do there exist B messages with Data?
val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial
// Do there exist B messages without Data?
val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint
// Statically optimize the case where hasData is a constant
if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None
}
case _:TLBundleC => {
// Do there eixst C messages with Data?
val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe
// Do there exist C messages without Data?
val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe
if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None
}
case _:TLBundleD => {
// Do there eixst D messages with Data?
val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB
// Do there exist D messages without Data?
val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT
if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None
}
case _:TLBundleE => Some(false)
}
}
def isRequest(x: TLChannel): Bool = {
x match {
case a: TLBundleA => true.B
case b: TLBundleB => true.B
case c: TLBundleC => c.opcode(2) && c.opcode(1)
// opcode === TLMessages.Release ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(2) && !d.opcode(1)
// opcode === TLMessages.Grant ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
}
def isResponse(x: TLChannel): Bool = {
x match {
case a: TLBundleA => false.B
case b: TLBundleB => false.B
case c: TLBundleC => !c.opcode(2) || !c.opcode(1)
// opcode =/= TLMessages.Release &&
// opcode =/= TLMessages.ReleaseData
case d: TLBundleD => true.B // Grant isResponse + isRequest
case e: TLBundleE => true.B
}
}
def hasData(x: TLChannel): Bool = {
val opdata = x match {
case a: TLBundleA => !a.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case b: TLBundleB => !b.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case c: TLBundleC => c.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.ProbeAckData ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
staticHasData(x).map(_.B).getOrElse(opdata)
}
def opcode(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.opcode
case b: TLBundleB => b.opcode
case c: TLBundleC => c.opcode
case d: TLBundleD => d.opcode
}
}
def param(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.param
case b: TLBundleB => b.param
case c: TLBundleC => c.param
case d: TLBundleD => d.param
}
}
def size(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.size
case b: TLBundleB => b.size
case c: TLBundleC => c.size
case d: TLBundleD => d.size
}
}
def data(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.data
case b: TLBundleB => b.data
case c: TLBundleC => c.data
case d: TLBundleD => d.data
}
}
def corrupt(x: TLDataChannel): Bool = {
x match {
case a: TLBundleA => a.corrupt
case b: TLBundleB => b.corrupt
case c: TLBundleC => c.corrupt
case d: TLBundleD => d.corrupt
}
}
def mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.mask
case b: TLBundleB => b.mask
case c: TLBundleC => mask(c.address, c.size)
}
}
def full_mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => mask(a.address, a.size)
case b: TLBundleB => mask(b.address, b.size)
case c: TLBundleC => mask(c.address, c.size)
}
}
def address(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.address
case b: TLBundleB => b.address
case c: TLBundleC => c.address
}
}
def source(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.source
case b: TLBundleB => b.source
case c: TLBundleC => c.source
case d: TLBundleD => d.source
}
}
def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes)
def addr_lo(x: UInt): UInt =
if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0)
def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x))
def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x))
def numBeats(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 1.U
case bundle: TLDataChannel => {
val hasData = this.hasData(bundle)
val size = this.size(bundle)
val cutoff = log2Ceil(manager.beatBytes)
val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U
val decode = UIntToOH(size, maxLgSize+1) >> cutoff
Mux(hasData, decode | small.asUInt, 1.U)
}
}
}
def numBeats1(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 0.U
case bundle: TLDataChannel => {
if (maxLgSize == 0) {
0.U
} else {
val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes)
Mux(hasData(bundle), decode, 0.U)
}
}
}
}
def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val beats1 = numBeats1(bits)
val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W))
val counter1 = counter - 1.U
val first = counter === 0.U
val last = counter === 1.U || beats1 === 0.U
val done = last && fire
val count = (beats1 & ~counter1)
when (fire) {
counter := Mux(first, beats1, counter1)
}
(first, last, done, count)
}
def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1
def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire)
def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid)
def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2
def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire)
def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid)
def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3
def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire)
def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid)
def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3)
}
def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire)
def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid)
def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4)
}
def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire)
def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid)
def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes))
}
def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire)
def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid)
// Does the request need T permissions to be executed?
def needT(a: TLBundleA): Bool = {
val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLPermissions.NtoB -> false.B,
TLPermissions.NtoT -> true.B,
TLPermissions.BtoT -> true.B))
MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array(
TLMessages.PutFullData -> true.B,
TLMessages.PutPartialData -> true.B,
TLMessages.ArithmeticData -> true.B,
TLMessages.LogicalData -> true.B,
TLMessages.Get -> false.B,
TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLHints.PREFETCH_READ -> false.B,
TLHints.PREFETCH_WRITE -> true.B)),
TLMessages.AcquireBlock -> acq_needT,
TLMessages.AcquirePerm -> acq_needT))
}
// This is a very expensive circuit; use only if you really mean it!
def inFlight(x: TLBundle): (UInt, UInt) = {
val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W))
val bce = manager.anySupportAcquireB && client.anySupportProbe
val (a_first, a_last, _) = firstlast(x.a)
val (b_first, b_last, _) = firstlast(x.b)
val (c_first, c_last, _) = firstlast(x.c)
val (d_first, d_last, _) = firstlast(x.d)
val (e_first, e_last, _) = firstlast(x.e)
val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits))
val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits))
val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits))
val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits))
val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits))
val a_inc = x.a.fire && a_first && a_request
val b_inc = x.b.fire && b_first && b_request
val c_inc = x.c.fire && c_first && c_request
val d_inc = x.d.fire && d_first && d_request
val e_inc = x.e.fire && e_first && e_request
val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil))
val a_dec = x.a.fire && a_last && a_response
val b_dec = x.b.fire && b_last && b_response
val c_dec = x.c.fire && c_last && c_response
val d_dec = x.d.fire && d_last && d_response
val e_dec = x.e.fire && e_last && e_response
val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil))
val next_flight = flight + PopCount(inc) - PopCount(dec)
flight := next_flight
(flight, next_flight)
}
def prettySourceMapping(context: String): String = {
s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n"
}
}
class TLEdgeOut(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
// Transfers
def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquireBlock
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquirePerm
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.Release
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ReleaseData
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) =
Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B)
def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAck
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions, data)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAckData
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B)
def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink)
def GrantAck(toSink: UInt): TLBundleE = {
val e = Wire(new TLBundleE(bundle))
e.sink := toSink
e
}
// Accesses
def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsGetFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Get
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutFullFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutFullData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, mask, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutPartialFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutPartialData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = {
require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsArithmeticFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.ArithmeticData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsLogicalFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.LogicalData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = {
require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsHintFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Hint
a.param := param
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data)
def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAckData
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size)
def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.HintAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
}
class TLEdgeIn(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = {
val todo = x.filter(!_.isEmpty)
val heads = todo.map(_.head)
val tails = todo.map(_.tail)
if (todo.isEmpty) Nil else { heads +: myTranspose(tails) }
}
// Transfers
def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = {
require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}")
val legal = client.supportsProbe(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Probe
b.param := capPermissions
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.Grant
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.GrantData
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B)
def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.ReleaseAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
// Accesses
def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = {
require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsGet(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Get
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsPutFull(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutFullData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, mask, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsPutPartial(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutPartialData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsArithmetic(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.ArithmeticData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsLogical(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.LogicalData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = {
require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsHint(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Hint
b.param := param
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size)
def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied)
def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data)
def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAckData
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B)
def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied)
def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B)
def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.HintAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
}
| module TileResetSetter( // @[TileResetSetter.scala:26:25]
input clock, // @[TileResetSetter.scala:26:25]
input reset, // @[TileResetSetter.scala:26:25]
input auto_clock_in_member_allClocks_uncore_clock, // @[LazyModuleImp.scala:107:25]
input auto_clock_in_member_allClocks_uncore_reset, // @[LazyModuleImp.scala:107:25]
output auto_clock_out_member_allClocks_uncore_clock, // @[LazyModuleImp.scala:107:25]
output auto_clock_out_member_allClocks_uncore_reset, // @[LazyModuleImp.scala:107:25]
output auto_tl_in_a_ready, // @[LazyModuleImp.scala:107:25]
input auto_tl_in_a_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_tl_in_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_tl_in_a_bits_param, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_tl_in_a_bits_size, // @[LazyModuleImp.scala:107:25]
input [10:0] auto_tl_in_a_bits_source, // @[LazyModuleImp.scala:107:25]
input [20:0] auto_tl_in_a_bits_address, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_tl_in_a_bits_mask, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_tl_in_a_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_tl_in_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_tl_in_d_ready, // @[LazyModuleImp.scala:107:25]
output auto_tl_in_d_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_tl_in_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_tl_in_d_bits_size, // @[LazyModuleImp.scala:107:25]
output [10:0] auto_tl_in_d_bits_source // @[LazyModuleImp.scala:107:25]
);
wire out_front_valid; // @[RegisterRouter.scala:87:24]
wire out_front_ready; // @[RegisterRouter.scala:87:24]
wire out_bits_read; // @[RegisterRouter.scala:87:24]
wire [10:0] out_bits_extra_tlrr_extra_source; // @[RegisterRouter.scala:87:24]
wire [8:0] in_bits_index; // @[RegisterRouter.scala:73:18]
wire in_bits_read; // @[RegisterRouter.scala:73:18]
wire auto_clock_in_member_allClocks_uncore_clock_0 = auto_clock_in_member_allClocks_uncore_clock; // @[TileResetSetter.scala:26:25]
wire auto_clock_in_member_allClocks_uncore_reset_0 = auto_clock_in_member_allClocks_uncore_reset; // @[TileResetSetter.scala:26:25]
wire auto_tl_in_a_valid_0 = auto_tl_in_a_valid; // @[TileResetSetter.scala:26:25]
wire [2:0] auto_tl_in_a_bits_opcode_0 = auto_tl_in_a_bits_opcode; // @[TileResetSetter.scala:26:25]
wire [2:0] auto_tl_in_a_bits_param_0 = auto_tl_in_a_bits_param; // @[TileResetSetter.scala:26:25]
wire [1:0] auto_tl_in_a_bits_size_0 = auto_tl_in_a_bits_size; // @[TileResetSetter.scala:26:25]
wire [10:0] auto_tl_in_a_bits_source_0 = auto_tl_in_a_bits_source; // @[TileResetSetter.scala:26:25]
wire [20:0] auto_tl_in_a_bits_address_0 = auto_tl_in_a_bits_address; // @[TileResetSetter.scala:26:25]
wire [7:0] auto_tl_in_a_bits_mask_0 = auto_tl_in_a_bits_mask; // @[TileResetSetter.scala:26:25]
wire [63:0] auto_tl_in_a_bits_data_0 = auto_tl_in_a_bits_data; // @[TileResetSetter.scala:26:25]
wire auto_tl_in_a_bits_corrupt_0 = auto_tl_in_a_bits_corrupt; // @[TileResetSetter.scala:26:25]
wire auto_tl_in_d_ready_0 = auto_tl_in_d_ready; // @[TileResetSetter.scala:26:25]
wire [1:0] _out_frontSel_T = 2'h1; // @[OneHot.scala:58:35]
wire [1:0] _out_backSel_T = 2'h1; // @[OneHot.scala:58:35]
wire [8:0] out_maskMatch = 9'h1FF; // @[RegisterRouter.scala:87:24]
wire tile_async_resets_0 = 1'h1; // @[TileResetSetter.scala:29:33]
wire _tile_async_resets_0_T = 1'h1; // @[TileResetSetter.scala:31:38]
wire out_frontSel_0 = 1'h1; // @[RegisterRouter.scala:87:24]
wire out_backSel_0 = 1'h1; // @[RegisterRouter.scala:87:24]
wire out_rifireMux_out = 1'h1; // @[RegisterRouter.scala:87:24]
wire _out_rifireMux_T_5 = 1'h1; // @[RegisterRouter.scala:87:24]
wire _out_rifireMux_WIRE_0 = 1'h1; // @[MuxLiteral.scala:49:48]
wire out_rifireMux = 1'h1; // @[MuxLiteral.scala:49:10]
wire out_wifireMux_out = 1'h1; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_T_6 = 1'h1; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_WIRE_0 = 1'h1; // @[MuxLiteral.scala:49:48]
wire out_wifireMux = 1'h1; // @[MuxLiteral.scala:49:10]
wire out_rofireMux_out = 1'h1; // @[RegisterRouter.scala:87:24]
wire _out_rofireMux_T_5 = 1'h1; // @[RegisterRouter.scala:87:24]
wire _out_rofireMux_WIRE_0 = 1'h1; // @[MuxLiteral.scala:49:48]
wire out_rofireMux = 1'h1; // @[MuxLiteral.scala:49:10]
wire out_wofireMux_out = 1'h1; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_T_6 = 1'h1; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_WIRE_0 = 1'h1; // @[MuxLiteral.scala:49:48]
wire out_wofireMux = 1'h1; // @[MuxLiteral.scala:49:10]
wire out_iready = 1'h1; // @[RegisterRouter.scala:87:24]
wire out_oready = 1'h1; // @[RegisterRouter.scala:87:24]
wire [2:0] tlNodeIn_d_bits_d_opcode = 3'h0; // @[Edges.scala:792:17]
wire [1:0] auto_tl_in_d_bits_param = 2'h0; // @[TileResetSetter.scala:26:25]
wire [1:0] tlNodeIn_d_bits_param = 2'h0; // @[MixedNode.scala:551:17]
wire [1:0] tlNodeIn_d_bits_d_param = 2'h0; // @[Edges.scala:792:17]
wire auto_tl_in_d_bits_sink = 1'h0; // @[TileResetSetter.scala:26:25]
wire auto_tl_in_d_bits_denied = 1'h0; // @[TileResetSetter.scala:26:25]
wire auto_tl_in_d_bits_corrupt = 1'h0; // @[TileResetSetter.scala:26:25]
wire tlNodeIn_d_bits_sink = 1'h0; // @[MixedNode.scala:551:17]
wire tlNodeIn_d_bits_denied = 1'h0; // @[MixedNode.scala:551:17]
wire tlNodeIn_d_bits_corrupt = 1'h0; // @[MixedNode.scala:551:17]
wire _out_T_7 = 1'h0; // @[RegisterRouter.scala:87:24]
wire _out_T_8 = 1'h0; // @[RegisterRouter.scala:87:24]
wire out_frontSel_1 = 1'h0; // @[RegisterRouter.scala:87:24]
wire out_backSel_1 = 1'h0; // @[RegisterRouter.scala:87:24]
wire _out_rifireMux_T_6 = 1'h0; // @[MuxLiteral.scala:49:17]
wire _out_wifireMux_T_7 = 1'h0; // @[MuxLiteral.scala:49:17]
wire _out_rofireMux_T_6 = 1'h0; // @[MuxLiteral.scala:49:17]
wire _out_wofireMux_T_7 = 1'h0; // @[MuxLiteral.scala:49:17]
wire _out_out_bits_data_T = 1'h0; // @[MuxLiteral.scala:49:17]
wire _out_out_bits_data_T_2 = 1'h0; // @[MuxLiteral.scala:49:17]
wire _out_out_bits_data_WIRE_1_0 = 1'h0; // @[MuxLiteral.scala:49:48]
wire _out_out_bits_data_T_3 = 1'h0; // @[MuxLiteral.scala:49:10]
wire _out_out_bits_data_T_4 = 1'h0; // @[RegisterRouter.scala:87:24]
wire tlNodeIn_d_bits_d_sink = 1'h0; // @[Edges.scala:792:17]
wire tlNodeIn_d_bits_d_denied = 1'h0; // @[Edges.scala:792:17]
wire tlNodeIn_d_bits_d_corrupt = 1'h0; // @[Edges.scala:792:17]
wire [63:0] auto_tl_in_d_bits_data = 64'h0; // @[TileResetSetter.scala:26:25]
wire [63:0] tlNodeIn_d_bits_data = 64'h0; // @[MixedNode.scala:551:17]
wire [63:0] out_bits_data = 64'h0; // @[RegisterRouter.scala:87:24]
wire [63:0] tlNodeIn_d_bits_d_data = 64'h0; // @[Edges.scala:792:17]
wire clockNodeIn_member_allClocks_uncore_clock = auto_clock_in_member_allClocks_uncore_clock_0; // @[MixedNode.scala:551:17]
wire clockNodeOut_member_allClocks_uncore_clock; // @[MixedNode.scala:542:17]
wire clockNodeIn_member_allClocks_uncore_reset = auto_clock_in_member_allClocks_uncore_reset_0; // @[MixedNode.scala:551:17]
wire clockNodeOut_member_allClocks_uncore_reset; // @[MixedNode.scala:542:17]
wire tlNodeIn_a_ready; // @[MixedNode.scala:551:17]
wire tlNodeIn_a_valid = auto_tl_in_a_valid_0; // @[MixedNode.scala:551:17]
wire [2:0] tlNodeIn_a_bits_opcode = auto_tl_in_a_bits_opcode_0; // @[MixedNode.scala:551:17]
wire [2:0] tlNodeIn_a_bits_param = auto_tl_in_a_bits_param_0; // @[MixedNode.scala:551:17]
wire [1:0] tlNodeIn_a_bits_size = auto_tl_in_a_bits_size_0; // @[MixedNode.scala:551:17]
wire [10:0] tlNodeIn_a_bits_source = auto_tl_in_a_bits_source_0; // @[MixedNode.scala:551:17]
wire [20:0] tlNodeIn_a_bits_address = auto_tl_in_a_bits_address_0; // @[MixedNode.scala:551:17]
wire [7:0] tlNodeIn_a_bits_mask = auto_tl_in_a_bits_mask_0; // @[MixedNode.scala:551:17]
wire [63:0] tlNodeIn_a_bits_data = auto_tl_in_a_bits_data_0; // @[MixedNode.scala:551:17]
wire tlNodeIn_a_bits_corrupt = auto_tl_in_a_bits_corrupt_0; // @[MixedNode.scala:551:17]
wire tlNodeIn_d_ready = auto_tl_in_d_ready_0; // @[MixedNode.scala:551:17]
wire tlNodeIn_d_valid; // @[MixedNode.scala:551:17]
wire [2:0] tlNodeIn_d_bits_opcode; // @[MixedNode.scala:551:17]
wire [1:0] tlNodeIn_d_bits_size; // @[MixedNode.scala:551:17]
wire [10:0] tlNodeIn_d_bits_source; // @[MixedNode.scala:551:17]
wire auto_clock_out_member_allClocks_uncore_clock_0; // @[TileResetSetter.scala:26:25]
wire auto_clock_out_member_allClocks_uncore_reset_0; // @[TileResetSetter.scala:26:25]
wire auto_tl_in_a_ready_0; // @[TileResetSetter.scala:26:25]
wire [2:0] auto_tl_in_d_bits_opcode_0; // @[TileResetSetter.scala:26:25]
wire [1:0] auto_tl_in_d_bits_size_0; // @[TileResetSetter.scala:26:25]
wire [10:0] auto_tl_in_d_bits_source_0; // @[TileResetSetter.scala:26:25]
wire auto_tl_in_d_valid_0; // @[TileResetSetter.scala:26:25]
wire in_ready; // @[RegisterRouter.scala:73:18]
assign auto_tl_in_a_ready_0 = tlNodeIn_a_ready; // @[MixedNode.scala:551:17]
wire in_valid = tlNodeIn_a_valid; // @[RegisterRouter.scala:73:18]
wire [1:0] in_bits_extra_tlrr_extra_size = tlNodeIn_a_bits_size; // @[RegisterRouter.scala:73:18]
wire [10:0] in_bits_extra_tlrr_extra_source = tlNodeIn_a_bits_source; // @[RegisterRouter.scala:73:18]
wire [7:0] in_bits_mask = tlNodeIn_a_bits_mask; // @[RegisterRouter.scala:73:18]
wire [63:0] in_bits_data = tlNodeIn_a_bits_data; // @[RegisterRouter.scala:73:18]
wire out_ready = tlNodeIn_d_ready; // @[RegisterRouter.scala:87:24]
wire out_valid; // @[RegisterRouter.scala:87:24]
assign auto_tl_in_d_valid_0 = tlNodeIn_d_valid; // @[MixedNode.scala:551:17]
assign auto_tl_in_d_bits_opcode_0 = tlNodeIn_d_bits_opcode; // @[MixedNode.scala:551:17]
wire [1:0] tlNodeIn_d_bits_d_size; // @[Edges.scala:792:17]
assign auto_tl_in_d_bits_size_0 = tlNodeIn_d_bits_size; // @[MixedNode.scala:551:17]
wire [10:0] tlNodeIn_d_bits_d_source; // @[Edges.scala:792:17]
assign auto_tl_in_d_bits_source_0 = tlNodeIn_d_bits_source; // @[MixedNode.scala:551:17]
assign auto_clock_out_member_allClocks_uncore_clock_0 = clockNodeOut_member_allClocks_uncore_clock; // @[MixedNode.scala:542:17]
assign auto_clock_out_member_allClocks_uncore_reset_0 = clockNodeOut_member_allClocks_uncore_reset; // @[MixedNode.scala:542:17]
assign clockNodeOut_member_allClocks_uncore_clock = clockNodeIn_member_allClocks_uncore_clock; // @[MixedNode.scala:542:17, :551:17]
assign clockNodeOut_member_allClocks_uncore_reset = clockNodeIn_member_allClocks_uncore_reset; // @[MixedNode.scala:542:17, :551:17]
wire _out_in_ready_T; // @[RegisterRouter.scala:87:24]
assign tlNodeIn_a_ready = in_ready; // @[RegisterRouter.scala:73:18]
wire _in_bits_read_T; // @[RegisterRouter.scala:74:36]
wire _out_front_valid_T = in_valid; // @[RegisterRouter.scala:73:18, :87:24]
wire out_front_bits_read = in_bits_read; // @[RegisterRouter.scala:73:18, :87:24]
wire [8:0] out_front_bits_index = in_bits_index; // @[RegisterRouter.scala:73:18, :87:24]
wire [63:0] out_front_bits_data = in_bits_data; // @[RegisterRouter.scala:73:18, :87:24]
wire [7:0] out_front_bits_mask = in_bits_mask; // @[RegisterRouter.scala:73:18, :87:24]
wire [10:0] out_front_bits_extra_tlrr_extra_source = in_bits_extra_tlrr_extra_source; // @[RegisterRouter.scala:73:18, :87:24]
wire [1:0] out_front_bits_extra_tlrr_extra_size = in_bits_extra_tlrr_extra_size; // @[RegisterRouter.scala:73:18, :87:24]
assign _in_bits_read_T = tlNodeIn_a_bits_opcode == 3'h4; // @[RegisterRouter.scala:74:36]
assign in_bits_read = _in_bits_read_T; // @[RegisterRouter.scala:73:18, :74:36]
wire [17:0] _in_bits_index_T = tlNodeIn_a_bits_address[20:3]; // @[Edges.scala:192:34]
assign in_bits_index = _in_bits_index_T[8:0]; // @[RegisterRouter.scala:73:18, :75:19]
wire _out_front_ready_T = out_ready; // @[RegisterRouter.scala:87:24]
wire _out_out_valid_T; // @[RegisterRouter.scala:87:24]
assign tlNodeIn_d_valid = out_valid; // @[RegisterRouter.scala:87:24]
wire _tlNodeIn_d_bits_opcode_T = out_bits_read; // @[RegisterRouter.scala:87:24, :105:25]
assign tlNodeIn_d_bits_d_source = out_bits_extra_tlrr_extra_source; // @[RegisterRouter.scala:87:24]
wire [1:0] out_bits_extra_tlrr_extra_size; // @[RegisterRouter.scala:87:24]
assign tlNodeIn_d_bits_d_size = out_bits_extra_tlrr_extra_size; // @[RegisterRouter.scala:87:24]
assign _out_in_ready_T = out_front_ready; // @[RegisterRouter.scala:87:24]
assign _out_out_valid_T = out_front_valid; // @[RegisterRouter.scala:87:24]
assign out_bits_read = out_front_bits_read; // @[RegisterRouter.scala:87:24]
wire [8:0] out_findex = out_front_bits_index; // @[RegisterRouter.scala:87:24]
wire [8:0] out_bindex = out_front_bits_index; // @[RegisterRouter.scala:87:24]
assign out_bits_extra_tlrr_extra_source = out_front_bits_extra_tlrr_extra_source; // @[RegisterRouter.scala:87:24]
assign out_bits_extra_tlrr_extra_size = out_front_bits_extra_tlrr_extra_size; // @[RegisterRouter.scala:87:24]
wire _out_T = out_findex == 9'h0; // @[RegisterRouter.scala:87:24]
wire _out_T_1 = out_bindex == 9'h0; // @[RegisterRouter.scala:87:24]
wire _out_rifireMux_T_3; // @[RegisterRouter.scala:87:24]
wire _out_out_bits_data_WIRE_0 = _out_T_1; // @[MuxLiteral.scala:49:48]
wire out_rivalid_0; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_T_4; // @[RegisterRouter.scala:87:24]
wire out_wivalid_0; // @[RegisterRouter.scala:87:24]
wire _out_rofireMux_T_3; // @[RegisterRouter.scala:87:24]
wire out_roready_0; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_T_4; // @[RegisterRouter.scala:87:24]
wire out_woready_0; // @[RegisterRouter.scala:87:24]
wire _out_frontMask_T = out_front_bits_mask[0]; // @[RegisterRouter.scala:87:24]
wire _out_backMask_T = out_front_bits_mask[0]; // @[RegisterRouter.scala:87:24]
wire _out_frontMask_T_1 = out_front_bits_mask[1]; // @[RegisterRouter.scala:87:24]
wire _out_backMask_T_1 = out_front_bits_mask[1]; // @[RegisterRouter.scala:87:24]
wire _out_frontMask_T_2 = out_front_bits_mask[2]; // @[RegisterRouter.scala:87:24]
wire _out_backMask_T_2 = out_front_bits_mask[2]; // @[RegisterRouter.scala:87:24]
wire _out_frontMask_T_3 = out_front_bits_mask[3]; // @[RegisterRouter.scala:87:24]
wire _out_backMask_T_3 = out_front_bits_mask[3]; // @[RegisterRouter.scala:87:24]
wire _out_frontMask_T_4 = out_front_bits_mask[4]; // @[RegisterRouter.scala:87:24]
wire _out_backMask_T_4 = out_front_bits_mask[4]; // @[RegisterRouter.scala:87:24]
wire _out_frontMask_T_5 = out_front_bits_mask[5]; // @[RegisterRouter.scala:87:24]
wire _out_backMask_T_5 = out_front_bits_mask[5]; // @[RegisterRouter.scala:87:24]
wire _out_frontMask_T_6 = out_front_bits_mask[6]; // @[RegisterRouter.scala:87:24]
wire _out_backMask_T_6 = out_front_bits_mask[6]; // @[RegisterRouter.scala:87:24]
wire _out_frontMask_T_7 = out_front_bits_mask[7]; // @[RegisterRouter.scala:87:24]
wire _out_backMask_T_7 = out_front_bits_mask[7]; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_frontMask_T_8 = {8{_out_frontMask_T}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_frontMask_T_9 = {8{_out_frontMask_T_1}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_frontMask_T_10 = {8{_out_frontMask_T_2}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_frontMask_T_11 = {8{_out_frontMask_T_3}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_frontMask_T_12 = {8{_out_frontMask_T_4}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_frontMask_T_13 = {8{_out_frontMask_T_5}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_frontMask_T_14 = {8{_out_frontMask_T_6}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_frontMask_T_15 = {8{_out_frontMask_T_7}}; // @[RegisterRouter.scala:87:24]
wire [15:0] out_frontMask_lo_lo = {_out_frontMask_T_9, _out_frontMask_T_8}; // @[RegisterRouter.scala:87:24]
wire [15:0] out_frontMask_lo_hi = {_out_frontMask_T_11, _out_frontMask_T_10}; // @[RegisterRouter.scala:87:24]
wire [31:0] out_frontMask_lo = {out_frontMask_lo_hi, out_frontMask_lo_lo}; // @[RegisterRouter.scala:87:24]
wire [15:0] out_frontMask_hi_lo = {_out_frontMask_T_13, _out_frontMask_T_12}; // @[RegisterRouter.scala:87:24]
wire [15:0] out_frontMask_hi_hi = {_out_frontMask_T_15, _out_frontMask_T_14}; // @[RegisterRouter.scala:87:24]
wire [31:0] out_frontMask_hi = {out_frontMask_hi_hi, out_frontMask_hi_lo}; // @[RegisterRouter.scala:87:24]
wire [63:0] out_frontMask = {out_frontMask_hi, out_frontMask_lo}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_backMask_T_8 = {8{_out_backMask_T}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_backMask_T_9 = {8{_out_backMask_T_1}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_backMask_T_10 = {8{_out_backMask_T_2}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_backMask_T_11 = {8{_out_backMask_T_3}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_backMask_T_12 = {8{_out_backMask_T_4}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_backMask_T_13 = {8{_out_backMask_T_5}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_backMask_T_14 = {8{_out_backMask_T_6}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_backMask_T_15 = {8{_out_backMask_T_7}}; // @[RegisterRouter.scala:87:24]
wire [15:0] out_backMask_lo_lo = {_out_backMask_T_9, _out_backMask_T_8}; // @[RegisterRouter.scala:87:24]
wire [15:0] out_backMask_lo_hi = {_out_backMask_T_11, _out_backMask_T_10}; // @[RegisterRouter.scala:87:24]
wire [31:0] out_backMask_lo = {out_backMask_lo_hi, out_backMask_lo_lo}; // @[RegisterRouter.scala:87:24]
wire [15:0] out_backMask_hi_lo = {_out_backMask_T_13, _out_backMask_T_12}; // @[RegisterRouter.scala:87:24]
wire [15:0] out_backMask_hi_hi = {_out_backMask_T_15, _out_backMask_T_14}; // @[RegisterRouter.scala:87:24]
wire [31:0] out_backMask_hi = {out_backMask_hi_hi, out_backMask_hi_lo}; // @[RegisterRouter.scala:87:24]
wire [63:0] out_backMask = {out_backMask_hi, out_backMask_lo}; // @[RegisterRouter.scala:87:24]
wire _out_rimask_T = out_frontMask[0]; // @[RegisterRouter.scala:87:24]
wire _out_wimask_T = out_frontMask[0]; // @[RegisterRouter.scala:87:24]
wire out_rimask = _out_rimask_T; // @[RegisterRouter.scala:87:24]
wire out_wimask = _out_wimask_T; // @[RegisterRouter.scala:87:24]
wire _out_romask_T = out_backMask[0]; // @[RegisterRouter.scala:87:24]
wire _out_womask_T = out_backMask[0]; // @[RegisterRouter.scala:87:24]
wire out_romask = _out_romask_T; // @[RegisterRouter.scala:87:24]
wire out_womask = _out_womask_T; // @[RegisterRouter.scala:87:24]
wire out_f_rivalid = out_rivalid_0 & out_rimask; // @[RegisterRouter.scala:87:24]
wire out_f_roready = out_roready_0 & out_romask; // @[RegisterRouter.scala:87:24]
wire out_f_wivalid = out_wivalid_0 & out_wimask; // @[RegisterRouter.scala:87:24]
wire out_f_woready = out_woready_0 & out_womask; // @[RegisterRouter.scala:87:24]
wire _out_T_2 = out_front_bits_data[0]; // @[RegisterRouter.scala:87:24]
wire _out_T_3 = ~out_rimask; // @[RegisterRouter.scala:87:24]
wire _out_T_4 = ~out_wimask; // @[RegisterRouter.scala:87:24]
wire _out_T_5 = ~out_romask; // @[RegisterRouter.scala:87:24]
wire _out_T_6 = ~out_womask; // @[RegisterRouter.scala:87:24]
wire _GEN = in_valid & out_front_ready; // @[RegisterRouter.scala:73:18, :87:24]
wire _out_rifireMux_T; // @[RegisterRouter.scala:87:24]
assign _out_rifireMux_T = _GEN; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_T; // @[RegisterRouter.scala:87:24]
assign _out_wifireMux_T = _GEN; // @[RegisterRouter.scala:87:24]
wire _out_rifireMux_T_1 = _out_rifireMux_T & out_front_bits_read; // @[RegisterRouter.scala:87:24]
wire _out_rifireMux_T_2 = _out_rifireMux_T_1; // @[RegisterRouter.scala:87:24]
assign _out_rifireMux_T_3 = _out_rifireMux_T_2 & _out_T; // @[RegisterRouter.scala:87:24]
assign out_rivalid_0 = _out_rifireMux_T_3; // @[RegisterRouter.scala:87:24]
wire _out_rifireMux_T_4 = ~_out_T; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_T_1 = ~out_front_bits_read; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_T_2 = _out_wifireMux_T & _out_wifireMux_T_1; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_T_3 = _out_wifireMux_T_2; // @[RegisterRouter.scala:87:24]
assign _out_wifireMux_T_4 = _out_wifireMux_T_3 & _out_T; // @[RegisterRouter.scala:87:24]
assign out_wivalid_0 = _out_wifireMux_T_4; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_T_5 = ~_out_T; // @[RegisterRouter.scala:87:24]
wire _GEN_0 = out_front_valid & out_ready; // @[RegisterRouter.scala:87:24]
wire _out_rofireMux_T; // @[RegisterRouter.scala:87:24]
assign _out_rofireMux_T = _GEN_0; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_T; // @[RegisterRouter.scala:87:24]
assign _out_wofireMux_T = _GEN_0; // @[RegisterRouter.scala:87:24]
wire _out_rofireMux_T_1 = _out_rofireMux_T & out_front_bits_read; // @[RegisterRouter.scala:87:24]
wire _out_rofireMux_T_2 = _out_rofireMux_T_1; // @[RegisterRouter.scala:87:24]
assign _out_rofireMux_T_3 = _out_rofireMux_T_2 & _out_T_1; // @[RegisterRouter.scala:87:24]
assign out_roready_0 = _out_rofireMux_T_3; // @[RegisterRouter.scala:87:24]
wire _out_rofireMux_T_4 = ~_out_T_1; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_T_1 = ~out_front_bits_read; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_T_2 = _out_wofireMux_T & _out_wofireMux_T_1; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_T_3 = _out_wofireMux_T_2; // @[RegisterRouter.scala:87:24]
assign _out_wofireMux_T_4 = _out_wofireMux_T_3 & _out_T_1; // @[RegisterRouter.scala:87:24]
assign out_woready_0 = _out_wofireMux_T_4; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_T_5 = ~_out_T_1; // @[RegisterRouter.scala:87:24]
assign in_ready = _out_in_ready_T; // @[RegisterRouter.scala:73:18, :87:24]
assign out_front_valid = _out_front_valid_T; // @[RegisterRouter.scala:87:24]
assign out_front_ready = _out_front_ready_T; // @[RegisterRouter.scala:87:24]
assign out_valid = _out_out_valid_T; // @[RegisterRouter.scala:87:24]
wire _out_out_bits_data_T_1 = _out_out_bits_data_WIRE_0; // @[MuxLiteral.scala:49:{10,48}]
assign tlNodeIn_d_bits_size = tlNodeIn_d_bits_d_size; // @[Edges.scala:792:17]
assign tlNodeIn_d_bits_source = tlNodeIn_d_bits_d_source; // @[Edges.scala:792:17]
assign tlNodeIn_d_bits_opcode = {2'h0, _tlNodeIn_d_bits_opcode_T}; // @[RegisterRouter.scala:105:{19,25}]
TLMonitor_73 monitor ( // @[Nodes.scala:27:25]
.clock (clock),
.reset (reset),
.io_in_a_ready (tlNodeIn_a_ready), // @[MixedNode.scala:551:17]
.io_in_a_valid (tlNodeIn_a_valid), // @[MixedNode.scala:551:17]
.io_in_a_bits_opcode (tlNodeIn_a_bits_opcode), // @[MixedNode.scala:551:17]
.io_in_a_bits_param (tlNodeIn_a_bits_param), // @[MixedNode.scala:551:17]
.io_in_a_bits_size (tlNodeIn_a_bits_size), // @[MixedNode.scala:551:17]
.io_in_a_bits_source (tlNodeIn_a_bits_source), // @[MixedNode.scala:551:17]
.io_in_a_bits_address (tlNodeIn_a_bits_address), // @[MixedNode.scala:551:17]
.io_in_a_bits_mask (tlNodeIn_a_bits_mask), // @[MixedNode.scala:551:17]
.io_in_a_bits_data (tlNodeIn_a_bits_data), // @[MixedNode.scala:551:17]
.io_in_a_bits_corrupt (tlNodeIn_a_bits_corrupt), // @[MixedNode.scala:551:17]
.io_in_d_ready (tlNodeIn_d_ready), // @[MixedNode.scala:551:17]
.io_in_d_valid (tlNodeIn_d_valid), // @[MixedNode.scala:551:17]
.io_in_d_bits_opcode (tlNodeIn_d_bits_opcode), // @[MixedNode.scala:551:17]
.io_in_d_bits_size (tlNodeIn_d_bits_size), // @[MixedNode.scala:551:17]
.io_in_d_bits_source (tlNodeIn_d_bits_source) // @[MixedNode.scala:551:17]
); // @[Nodes.scala:27:25]
AsyncResetRegVec_w1_i0_6 r_tile_resets_0 ( // @[TileResetSetter.scala:33:15]
.clock (clock),
.io_d (_out_T_2), // @[RegisterRouter.scala:87:24]
.io_en (out_f_woready) // @[RegisterRouter.scala:87:24]
); // @[TileResetSetter.scala:33:15]
assign auto_clock_out_member_allClocks_uncore_clock = auto_clock_out_member_allClocks_uncore_clock_0; // @[TileResetSetter.scala:26:25]
assign auto_clock_out_member_allClocks_uncore_reset = auto_clock_out_member_allClocks_uncore_reset_0; // @[TileResetSetter.scala:26:25]
assign auto_tl_in_a_ready = auto_tl_in_a_ready_0; // @[TileResetSetter.scala:26:25]
assign auto_tl_in_d_valid = auto_tl_in_d_valid_0; // @[TileResetSetter.scala:26:25]
assign auto_tl_in_d_bits_opcode = auto_tl_in_d_bits_opcode_0; // @[TileResetSetter.scala:26:25]
assign auto_tl_in_d_bits_size = auto_tl_in_d_bits_size_0; // @[TileResetSetter.scala:26:25]
assign auto_tl_in_d_bits_source = auto_tl_in_d_bits_source_0; // @[TileResetSetter.scala:26:25]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File MulRecFN.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (ported from Verilog to
Chisel by Andrew Waterman).
Copyright 2019, 2020 The Regents of the University of California. All rights
reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import chisel3.util._
import consts._
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class MulFullRawFN(expWidth: Int, sigWidth: Int) extends chisel3.RawModule
{
val io = IO(new Bundle {
val a = Input(new RawFloat(expWidth, sigWidth))
val b = Input(new RawFloat(expWidth, sigWidth))
val invalidExc = Output(Bool())
val rawOut = Output(new RawFloat(expWidth, sigWidth*2 - 1))
})
/*------------------------------------------------------------------------
*------------------------------------------------------------------------*/
val notSigNaN_invalidExc = (io.a.isInf && io.b.isZero) || (io.a.isZero && io.b.isInf)
val notNaN_isInfOut = io.a.isInf || io.b.isInf
val notNaN_isZeroOut = io.a.isZero || io.b.isZero
val notNaN_signOut = io.a.sign ^ io.b.sign
val common_sExpOut = io.a.sExp + io.b.sExp - (1<<expWidth).S
val common_sigOut = (io.a.sig * io.b.sig)(sigWidth*2 - 1, 0)
/*------------------------------------------------------------------------
*------------------------------------------------------------------------*/
io.invalidExc := isSigNaNRawFloat(io.a) || isSigNaNRawFloat(io.b) || notSigNaN_invalidExc
io.rawOut.isInf := notNaN_isInfOut
io.rawOut.isZero := notNaN_isZeroOut
io.rawOut.sExp := common_sExpOut
io.rawOut.isNaN := io.a.isNaN || io.b.isNaN
io.rawOut.sign := notNaN_signOut
io.rawOut.sig := common_sigOut
}
class MulRawFN(expWidth: Int, sigWidth: Int) extends chisel3.RawModule
{
val io = IO(new Bundle {
val a = Input(new RawFloat(expWidth, sigWidth))
val b = Input(new RawFloat(expWidth, sigWidth))
val invalidExc = Output(Bool())
val rawOut = Output(new RawFloat(expWidth, sigWidth + 2))
})
val mulFullRaw = Module(new MulFullRawFN(expWidth, sigWidth))
mulFullRaw.io.a := io.a
mulFullRaw.io.b := io.b
io.invalidExc := mulFullRaw.io.invalidExc
io.rawOut := mulFullRaw.io.rawOut
io.rawOut.sig := {
val sig = mulFullRaw.io.rawOut.sig
Cat(sig >> (sigWidth - 2), sig(sigWidth - 3, 0).orR)
}
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class MulRecFN(expWidth: Int, sigWidth: Int) extends chisel3.RawModule
{
val io = IO(new Bundle {
val a = Input(UInt((expWidth + sigWidth + 1).W))
val b = Input(UInt((expWidth + sigWidth + 1).W))
val roundingMode = Input(UInt(3.W))
val detectTininess = Input(Bool())
val out = Output(UInt((expWidth + sigWidth + 1).W))
val exceptionFlags = Output(UInt(5.W))
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val mulRawFN = Module(new MulRawFN(expWidth, sigWidth))
mulRawFN.io.a := rawFloatFromRecFN(expWidth, sigWidth, io.a)
mulRawFN.io.b := rawFloatFromRecFN(expWidth, sigWidth, io.b)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val roundRawFNToRecFN =
Module(new RoundRawFNToRecFN(expWidth, sigWidth, 0))
roundRawFNToRecFN.io.invalidExc := mulRawFN.io.invalidExc
roundRawFNToRecFN.io.infiniteExc := false.B
roundRawFNToRecFN.io.in := mulRawFN.io.rawOut
roundRawFNToRecFN.io.roundingMode := io.roundingMode
roundRawFNToRecFN.io.detectTininess := io.detectTininess
io.out := roundRawFNToRecFN.io.out
io.exceptionFlags := roundRawFNToRecFN.io.exceptionFlags
}
File rawFloatFromRecFN.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import chisel3.util._
/*----------------------------------------------------------------------------
| In the result, no more than one of 'isNaN', 'isInf', and 'isZero' will be
| set.
*----------------------------------------------------------------------------*/
object rawFloatFromRecFN
{
def apply(expWidth: Int, sigWidth: Int, in: Bits): RawFloat =
{
val exp = in(expWidth + sigWidth - 1, sigWidth - 1)
val isZero = exp(expWidth, expWidth - 2) === 0.U
val isSpecial = exp(expWidth, expWidth - 1) === 3.U
val out = Wire(new RawFloat(expWidth, sigWidth))
out.isNaN := isSpecial && exp(expWidth - 2)
out.isInf := isSpecial && ! exp(expWidth - 2)
out.isZero := isZero
out.sign := in(expWidth + sigWidth)
out.sExp := exp.zext
out.sig := 0.U(1.W) ## ! isZero ## in(sigWidth - 2, 0)
out
}
}
| module MulRecFN_25( // @[MulRecFN.scala:100:7]
input [32:0] io_a, // @[MulRecFN.scala:102:16]
input [32:0] io_b, // @[MulRecFN.scala:102:16]
output [32:0] io_out // @[MulRecFN.scala:102:16]
);
wire _mulRawFN_io_invalidExc; // @[MulRecFN.scala:113:26]
wire _mulRawFN_io_rawOut_isNaN; // @[MulRecFN.scala:113:26]
wire _mulRawFN_io_rawOut_isInf; // @[MulRecFN.scala:113:26]
wire _mulRawFN_io_rawOut_isZero; // @[MulRecFN.scala:113:26]
wire _mulRawFN_io_rawOut_sign; // @[MulRecFN.scala:113:26]
wire [9:0] _mulRawFN_io_rawOut_sExp; // @[MulRecFN.scala:113:26]
wire [26:0] _mulRawFN_io_rawOut_sig; // @[MulRecFN.scala:113:26]
wire [32:0] io_a_0 = io_a; // @[MulRecFN.scala:100:7]
wire [32:0] io_b_0 = io_b; // @[MulRecFN.scala:100:7]
wire io_detectTininess = 1'h1; // @[MulRecFN.scala:100:7, :102:16, :121:15]
wire [2:0] io_roundingMode = 3'h0; // @[MulRecFN.scala:100:7, :102:16, :121:15]
wire [32:0] io_out_0; // @[MulRecFN.scala:100:7]
wire [4:0] io_exceptionFlags; // @[MulRecFN.scala:100:7]
wire [8:0] mulRawFN_io_a_exp = io_a_0[31:23]; // @[rawFloatFromRecFN.scala:51:21]
wire [2:0] _mulRawFN_io_a_isZero_T = mulRawFN_io_a_exp[8:6]; // @[rawFloatFromRecFN.scala:51:21, :52:28]
wire mulRawFN_io_a_isZero = _mulRawFN_io_a_isZero_T == 3'h0; // @[rawFloatFromRecFN.scala:52:{28,53}]
wire mulRawFN_io_a_out_isZero = mulRawFN_io_a_isZero; // @[rawFloatFromRecFN.scala:52:53, :55:23]
wire [1:0] _mulRawFN_io_a_isSpecial_T = mulRawFN_io_a_exp[8:7]; // @[rawFloatFromRecFN.scala:51:21, :53:28]
wire mulRawFN_io_a_isSpecial = &_mulRawFN_io_a_isSpecial_T; // @[rawFloatFromRecFN.scala:53:{28,53}]
wire _mulRawFN_io_a_out_isNaN_T_1; // @[rawFloatFromRecFN.scala:56:33]
wire _mulRawFN_io_a_out_isInf_T_2; // @[rawFloatFromRecFN.scala:57:33]
wire _mulRawFN_io_a_out_sign_T; // @[rawFloatFromRecFN.scala:59:25]
wire [9:0] _mulRawFN_io_a_out_sExp_T; // @[rawFloatFromRecFN.scala:60:27]
wire [24:0] _mulRawFN_io_a_out_sig_T_3; // @[rawFloatFromRecFN.scala:61:44]
wire mulRawFN_io_a_out_isNaN; // @[rawFloatFromRecFN.scala:55:23]
wire mulRawFN_io_a_out_isInf; // @[rawFloatFromRecFN.scala:55:23]
wire mulRawFN_io_a_out_sign; // @[rawFloatFromRecFN.scala:55:23]
wire [9:0] mulRawFN_io_a_out_sExp; // @[rawFloatFromRecFN.scala:55:23]
wire [24:0] mulRawFN_io_a_out_sig; // @[rawFloatFromRecFN.scala:55:23]
wire _mulRawFN_io_a_out_isNaN_T = mulRawFN_io_a_exp[6]; // @[rawFloatFromRecFN.scala:51:21, :56:41]
wire _mulRawFN_io_a_out_isInf_T = mulRawFN_io_a_exp[6]; // @[rawFloatFromRecFN.scala:51:21, :56:41, :57:41]
assign _mulRawFN_io_a_out_isNaN_T_1 = mulRawFN_io_a_isSpecial & _mulRawFN_io_a_out_isNaN_T; // @[rawFloatFromRecFN.scala:53:53, :56:{33,41}]
assign mulRawFN_io_a_out_isNaN = _mulRawFN_io_a_out_isNaN_T_1; // @[rawFloatFromRecFN.scala:55:23, :56:33]
wire _mulRawFN_io_a_out_isInf_T_1 = ~_mulRawFN_io_a_out_isInf_T; // @[rawFloatFromRecFN.scala:57:{36,41}]
assign _mulRawFN_io_a_out_isInf_T_2 = mulRawFN_io_a_isSpecial & _mulRawFN_io_a_out_isInf_T_1; // @[rawFloatFromRecFN.scala:53:53, :57:{33,36}]
assign mulRawFN_io_a_out_isInf = _mulRawFN_io_a_out_isInf_T_2; // @[rawFloatFromRecFN.scala:55:23, :57:33]
assign _mulRawFN_io_a_out_sign_T = io_a_0[32]; // @[rawFloatFromRecFN.scala:59:25]
assign mulRawFN_io_a_out_sign = _mulRawFN_io_a_out_sign_T; // @[rawFloatFromRecFN.scala:55:23, :59:25]
assign _mulRawFN_io_a_out_sExp_T = {1'h0, mulRawFN_io_a_exp}; // @[rawFloatFromRecFN.scala:51:21, :60:27]
assign mulRawFN_io_a_out_sExp = _mulRawFN_io_a_out_sExp_T; // @[rawFloatFromRecFN.scala:55:23, :60:27]
wire _mulRawFN_io_a_out_sig_T = ~mulRawFN_io_a_isZero; // @[rawFloatFromRecFN.scala:52:53, :61:35]
wire [1:0] _mulRawFN_io_a_out_sig_T_1 = {1'h0, _mulRawFN_io_a_out_sig_T}; // @[rawFloatFromRecFN.scala:61:{32,35}]
wire [22:0] _mulRawFN_io_a_out_sig_T_2 = io_a_0[22:0]; // @[rawFloatFromRecFN.scala:61:49]
assign _mulRawFN_io_a_out_sig_T_3 = {_mulRawFN_io_a_out_sig_T_1, _mulRawFN_io_a_out_sig_T_2}; // @[rawFloatFromRecFN.scala:61:{32,44,49}]
assign mulRawFN_io_a_out_sig = _mulRawFN_io_a_out_sig_T_3; // @[rawFloatFromRecFN.scala:55:23, :61:44]
wire [8:0] mulRawFN_io_b_exp = io_b_0[31:23]; // @[rawFloatFromRecFN.scala:51:21]
wire [2:0] _mulRawFN_io_b_isZero_T = mulRawFN_io_b_exp[8:6]; // @[rawFloatFromRecFN.scala:51:21, :52:28]
wire mulRawFN_io_b_isZero = _mulRawFN_io_b_isZero_T == 3'h0; // @[rawFloatFromRecFN.scala:52:{28,53}]
wire mulRawFN_io_b_out_isZero = mulRawFN_io_b_isZero; // @[rawFloatFromRecFN.scala:52:53, :55:23]
wire [1:0] _mulRawFN_io_b_isSpecial_T = mulRawFN_io_b_exp[8:7]; // @[rawFloatFromRecFN.scala:51:21, :53:28]
wire mulRawFN_io_b_isSpecial = &_mulRawFN_io_b_isSpecial_T; // @[rawFloatFromRecFN.scala:53:{28,53}]
wire _mulRawFN_io_b_out_isNaN_T_1; // @[rawFloatFromRecFN.scala:56:33]
wire _mulRawFN_io_b_out_isInf_T_2; // @[rawFloatFromRecFN.scala:57:33]
wire _mulRawFN_io_b_out_sign_T; // @[rawFloatFromRecFN.scala:59:25]
wire [9:0] _mulRawFN_io_b_out_sExp_T; // @[rawFloatFromRecFN.scala:60:27]
wire [24:0] _mulRawFN_io_b_out_sig_T_3; // @[rawFloatFromRecFN.scala:61:44]
wire mulRawFN_io_b_out_isNaN; // @[rawFloatFromRecFN.scala:55:23]
wire mulRawFN_io_b_out_isInf; // @[rawFloatFromRecFN.scala:55:23]
wire mulRawFN_io_b_out_sign; // @[rawFloatFromRecFN.scala:55:23]
wire [9:0] mulRawFN_io_b_out_sExp; // @[rawFloatFromRecFN.scala:55:23]
wire [24:0] mulRawFN_io_b_out_sig; // @[rawFloatFromRecFN.scala:55:23]
wire _mulRawFN_io_b_out_isNaN_T = mulRawFN_io_b_exp[6]; // @[rawFloatFromRecFN.scala:51:21, :56:41]
wire _mulRawFN_io_b_out_isInf_T = mulRawFN_io_b_exp[6]; // @[rawFloatFromRecFN.scala:51:21, :56:41, :57:41]
assign _mulRawFN_io_b_out_isNaN_T_1 = mulRawFN_io_b_isSpecial & _mulRawFN_io_b_out_isNaN_T; // @[rawFloatFromRecFN.scala:53:53, :56:{33,41}]
assign mulRawFN_io_b_out_isNaN = _mulRawFN_io_b_out_isNaN_T_1; // @[rawFloatFromRecFN.scala:55:23, :56:33]
wire _mulRawFN_io_b_out_isInf_T_1 = ~_mulRawFN_io_b_out_isInf_T; // @[rawFloatFromRecFN.scala:57:{36,41}]
assign _mulRawFN_io_b_out_isInf_T_2 = mulRawFN_io_b_isSpecial & _mulRawFN_io_b_out_isInf_T_1; // @[rawFloatFromRecFN.scala:53:53, :57:{33,36}]
assign mulRawFN_io_b_out_isInf = _mulRawFN_io_b_out_isInf_T_2; // @[rawFloatFromRecFN.scala:55:23, :57:33]
assign _mulRawFN_io_b_out_sign_T = io_b_0[32]; // @[rawFloatFromRecFN.scala:59:25]
assign mulRawFN_io_b_out_sign = _mulRawFN_io_b_out_sign_T; // @[rawFloatFromRecFN.scala:55:23, :59:25]
assign _mulRawFN_io_b_out_sExp_T = {1'h0, mulRawFN_io_b_exp}; // @[rawFloatFromRecFN.scala:51:21, :60:27]
assign mulRawFN_io_b_out_sExp = _mulRawFN_io_b_out_sExp_T; // @[rawFloatFromRecFN.scala:55:23, :60:27]
wire _mulRawFN_io_b_out_sig_T = ~mulRawFN_io_b_isZero; // @[rawFloatFromRecFN.scala:52:53, :61:35]
wire [1:0] _mulRawFN_io_b_out_sig_T_1 = {1'h0, _mulRawFN_io_b_out_sig_T}; // @[rawFloatFromRecFN.scala:61:{32,35}]
wire [22:0] _mulRawFN_io_b_out_sig_T_2 = io_b_0[22:0]; // @[rawFloatFromRecFN.scala:61:49]
assign _mulRawFN_io_b_out_sig_T_3 = {_mulRawFN_io_b_out_sig_T_1, _mulRawFN_io_b_out_sig_T_2}; // @[rawFloatFromRecFN.scala:61:{32,44,49}]
assign mulRawFN_io_b_out_sig = _mulRawFN_io_b_out_sig_T_3; // @[rawFloatFromRecFN.scala:55:23, :61:44]
MulRawFN_25 mulRawFN ( // @[MulRecFN.scala:113:26]
.io_a_isNaN (mulRawFN_io_a_out_isNaN), // @[rawFloatFromRecFN.scala:55:23]
.io_a_isInf (mulRawFN_io_a_out_isInf), // @[rawFloatFromRecFN.scala:55:23]
.io_a_isZero (mulRawFN_io_a_out_isZero), // @[rawFloatFromRecFN.scala:55:23]
.io_a_sign (mulRawFN_io_a_out_sign), // @[rawFloatFromRecFN.scala:55:23]
.io_a_sExp (mulRawFN_io_a_out_sExp), // @[rawFloatFromRecFN.scala:55:23]
.io_a_sig (mulRawFN_io_a_out_sig), // @[rawFloatFromRecFN.scala:55:23]
.io_b_isNaN (mulRawFN_io_b_out_isNaN), // @[rawFloatFromRecFN.scala:55:23]
.io_b_isInf (mulRawFN_io_b_out_isInf), // @[rawFloatFromRecFN.scala:55:23]
.io_b_isZero (mulRawFN_io_b_out_isZero), // @[rawFloatFromRecFN.scala:55:23]
.io_b_sign (mulRawFN_io_b_out_sign), // @[rawFloatFromRecFN.scala:55:23]
.io_b_sExp (mulRawFN_io_b_out_sExp), // @[rawFloatFromRecFN.scala:55:23]
.io_b_sig (mulRawFN_io_b_out_sig), // @[rawFloatFromRecFN.scala:55:23]
.io_invalidExc (_mulRawFN_io_invalidExc),
.io_rawOut_isNaN (_mulRawFN_io_rawOut_isNaN),
.io_rawOut_isInf (_mulRawFN_io_rawOut_isInf),
.io_rawOut_isZero (_mulRawFN_io_rawOut_isZero),
.io_rawOut_sign (_mulRawFN_io_rawOut_sign),
.io_rawOut_sExp (_mulRawFN_io_rawOut_sExp),
.io_rawOut_sig (_mulRawFN_io_rawOut_sig)
); // @[MulRecFN.scala:113:26]
RoundRawFNToRecFN_e8_s24_90 roundRawFNToRecFN ( // @[MulRecFN.scala:121:15]
.io_invalidExc (_mulRawFN_io_invalidExc), // @[MulRecFN.scala:113:26]
.io_in_isNaN (_mulRawFN_io_rawOut_isNaN), // @[MulRecFN.scala:113:26]
.io_in_isInf (_mulRawFN_io_rawOut_isInf), // @[MulRecFN.scala:113:26]
.io_in_isZero (_mulRawFN_io_rawOut_isZero), // @[MulRecFN.scala:113:26]
.io_in_sign (_mulRawFN_io_rawOut_sign), // @[MulRecFN.scala:113:26]
.io_in_sExp (_mulRawFN_io_rawOut_sExp), // @[MulRecFN.scala:113:26]
.io_in_sig (_mulRawFN_io_rawOut_sig), // @[MulRecFN.scala:113:26]
.io_out (io_out_0),
.io_exceptionFlags (io_exceptionFlags)
); // @[MulRecFN.scala:121:15]
assign io_out = io_out_0; // @[MulRecFN.scala:100:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File StoreSequencer.scala:
package saturn.backend
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config._
import saturn.common._
class StoreSequencer(implicit p: Parameters) extends PipeSequencer(new StoreDataMicroOp)(p) {
def accepts(inst: VectorIssueInst) = inst.vmu && inst.opcode(5)
val valid = RegInit(false.B)
val inst = Reg(new VectorIssueInst)
val eidx = Reg(UInt(log2Ceil(maxVLMax).W))
val sidx = Reg(UInt(3.W))
val rvd_mask = Reg(UInt(egsTotal.W))
val rvm_mask = Reg(UInt(egsPerVReg.W))
val sub_dlen = Reg(UInt(2.W))
val head = Reg(Bool())
val renvm = !inst.vm && inst.mop === mopUnit
val next_eidx = get_next_eidx(inst.vconfig.vl, eidx, inst.mem_elem_size, sub_dlen, false.B, false.B)
val tail = next_eidx === inst.vconfig.vl && sidx === inst.seg_nf
io.dis.ready := !valid || (tail && io.iss.fire) && !io.dis_stall
when (io.dis.fire) {
val iss_inst = io.dis.bits
valid := true.B
inst := iss_inst
eidx := iss_inst.vstart
sidx := 0.U
val rvd_arch_mask = Wire(Vec(32, Bool()))
for (i <- 0 until 32) {
val group = i.U >> iss_inst.emul
val rd_group = iss_inst.rd >> iss_inst.emul
rvd_arch_mask(i) := group >= rd_group && group <= (rd_group + iss_inst.nf)
}
rvd_mask := FillInterleaved(egsPerVReg, rvd_arch_mask.asUInt)
rvm_mask := Mux(!iss_inst.vm, ~(0.U(egsPerVReg.W)), 0.U)
sub_dlen := Mux(iss_inst.seg_nf =/= 0.U && (dLenOffBits.U > (3.U +& iss_inst.mem_elem_size)),
dLenOffBits.U - 3.U - iss_inst.mem_elem_size,
0.U)
head := true.B
} .elsewhen (io.iss.fire) {
valid := !tail
head := false.B
}
io.vat := inst.vat
io.seq_hazard.valid := valid
io.seq_hazard.bits.rintent := hazardMultiply(rvd_mask | rvm_mask)
io.seq_hazard.bits.wintent := 0.U
io.seq_hazard.bits.vat := inst.vat
val vd_read_oh = UIntToOH(io.rvd.req.bits.eg)
val vm_read_oh = Mux(renvm, UIntToOH(io.rvm.req.bits.eg), 0.U)
val raw_hazard = ((vm_read_oh | vd_read_oh) & io.older_writes) =/= 0.U
val data_hazard = raw_hazard
val oldest = inst.vat === io.vat_head
io.rvd.req.valid := valid && io.iss.ready
io.rvd.req.bits.eg := getEgId(inst.rd + (sidx << inst.emul), eidx, inst.mem_elem_size, false.B)
io.rvd.req.bits.oldest := oldest
io.rvm.req.valid := valid && renvm && io.iss.ready
io.rvm.req.bits.eg := getEgId(0.U, eidx, 0.U, true.B)
io.rvm.req.bits.oldest := oldest
io.iss.valid := valid && !data_hazard && (!renvm || io.rvm.req.ready) && io.rvd.req.ready
io.iss.bits.stdata := io.rvd.resp
val head_mask = get_head_mask(~(0.U(dLenB.W)), eidx , inst.mem_elem_size)
val tail_mask = get_tail_mask(~(0.U(dLenB.W)), next_eidx, inst.mem_elem_size)
val vm_mask = Mux(!renvm, ~(0.U(dLenB.W)), get_vm_mask(io.rvm.resp, eidx, inst.mem_elem_size))
io.iss.bits.stmask := vm_mask
io.iss.bits.debug_id := inst.debug_id
io.iss.bits.tail := tail
io.iss.bits.vat := inst.vat
when (io.iss.fire && !tail) {
when (next_is_new_eg(eidx, next_eidx, inst.mem_elem_size, false.B) && vParams.enableChaining.B) {
rvd_mask := rvd_mask & ~UIntToOH(io.rvd.req.bits.eg)
}
when (next_is_new_eg(eidx, next_eidx, 0.U, true.B) && vParams.enableChaining.B) {
rvm_mask := rvm_mask & ~UIntToOH(io.rvm.req.bits.eg)
}
when (sidx === inst.seg_nf) {
sidx := 0.U
eidx := next_eidx
} .otherwise {
sidx := sidx + 1.U
}
}
io.busy := valid
io.head := head
}
File LoadSequencer.scala:
package saturn.backend
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config._
import saturn.common._
class LoadSequencer(implicit p: Parameters) extends PipeSequencer(new LoadRespMicroOp)(p) {
def accepts(inst: VectorIssueInst) = inst.vmu && !inst.opcode(5)
val valid = RegInit(false.B)
val inst = Reg(new BackendIssueInst)
val eidx = Reg(UInt(log2Ceil(maxVLMax).W))
val sidx = Reg(UInt(3.W))
val wvd_mask = Reg(UInt(egsTotal.W))
val rvm_mask = Reg(UInt(egsPerVReg.W))
val head = Reg(Bool())
val renvm = !inst.vm
val next_eidx = get_next_eidx(inst.vconfig.vl, eidx, inst.mem_elem_size, 0.U, false.B, false.B)
val tail = next_eidx === inst.vconfig.vl && sidx === inst.seg_nf
io.dis.ready := !valid || (tail && io.iss.fire) && !io.dis_stall
when (io.dis.fire) {
val iss_inst = io.dis.bits
valid := true.B
inst := iss_inst
eidx := iss_inst.vstart
sidx := iss_inst.segstart
val wvd_arch_mask = Wire(Vec(32, Bool()))
for (i <- 0 until 32) {
val group = i.U >> iss_inst.emul
val rd_group = iss_inst.rd >> iss_inst.emul
wvd_arch_mask(i) := group >= rd_group && group <= (rd_group + iss_inst.nf)
}
wvd_mask := FillInterleaved(egsPerVReg, wvd_arch_mask.asUInt)
rvm_mask := Mux(!iss_inst.vm, ~(0.U(egsPerVReg.W)), 0.U)
head := true.B
} .elsewhen (io.iss.fire) {
valid := !tail
head := false.B
}
io.vat := inst.vat
io.seq_hazard.valid := valid
io.seq_hazard.bits.rintent := hazardMultiply(rvm_mask)
io.seq_hazard.bits.wintent := hazardMultiply(wvd_mask)
io.seq_hazard.bits.vat := inst.vat
val vm_read_oh = Mux(renvm, UIntToOH(io.rvm.req.bits.eg), 0.U)
val vd_write_oh = UIntToOH(io.iss.bits.wvd_eg)
val raw_hazard = (vm_read_oh & io.older_writes) =/= 0.U
val waw_hazard = (vd_write_oh & io.older_writes) =/= 0.U
val war_hazard = (vd_write_oh & io.older_reads) =/= 0.U
val data_hazard = raw_hazard || waw_hazard || war_hazard
io.rvm.req.valid := valid && renvm
io.rvm.req.bits.eg := getEgId(0.U, eidx, 0.U, true.B)
io.rvm.req.bits.oldest := inst.vat === io.vat_head
io.iss.valid := valid && !data_hazard && (!renvm || io.rvm.req.ready)
io.iss.bits.wvd_eg := getEgId(inst.rd + (sidx << inst.emul), eidx, inst.mem_elem_size, false.B)
io.iss.bits.tail := tail
io.iss.bits.vat := inst.vat
io.iss.bits.debug_id := inst.debug_id
val head_mask = get_head_mask(~(0.U(dLenB.W)), eidx , inst.mem_elem_size)
val tail_mask = get_tail_mask(~(0.U(dLenB.W)), next_eidx, inst.mem_elem_size)
val vm_mask = Mux(!renvm, ~(0.U(dLenB.W)), get_vm_mask(io.rvm.resp, eidx, inst.mem_elem_size))
io.iss.bits.wmask := Mux(sidx > inst.segend && inst.seg_nf =/= 0.U, 0.U, head_mask & tail_mask & vm_mask)
when (io.iss.fire && !tail) {
when (next_is_new_eg(eidx, next_eidx, inst.mem_elem_size, false.B) && vParams.enableChaining.B) {
wvd_mask := wvd_mask & ~vd_write_oh
}
when (next_is_new_eg(eidx, next_eidx, 0.U, true.B) && vParams.enableChaining.B) {
rvm_mask := rvm_mask & ~UIntToOH(io.rvm.req.bits.eg)
}
when (sidx === inst.seg_nf) {
sidx := 0.U
eidx := next_eidx
} .otherwise {
sidx := sidx + 1.U
}
}
io.busy := valid
io.head := head
}
File Backend.scala:
package saturn.backend
import chisel3._
import chisel3.util._
import chisel3.experimental.dataview._
import org.chipsalliance.cde.config._
import freechips.rocketchip.tile._
import freechips.rocketchip.util._
import saturn.mem._
import saturn.exu._
import saturn.common._
import saturn.insns._
class VectorBackend(implicit p: Parameters) extends CoreModule()(p) with HasVectorParams {
val io = IO(new Bundle {
val dis = Flipped(Decoupled(new VectorIssueInst))
val vmu = Flipped(new VectorMemDatapathIO)
val busy = Output(Bool())
val index_access = new VectorIndexAccessIO
val mask_access = new VectorMaskAccessIO
val scalar_resp = Decoupled(new ScalarWrite)
val set_vxsat = Output(Bool())
val set_fflags = Output(Valid(UInt(5.W)))
val fp_req = Decoupled(new FPInput())
val fp_resp = Flipped(Valid(new FPResult()))
val vat_tail = Input(UInt(vParams.vatSz.W))
val vat_head = Input(UInt(vParams.vatSz.W))
val vat_release = Output(Vec(nRelease, Valid(UInt(vParams.vatSz.W))))
})
require(vLen >= 64)
require(xLen == 64)
require(vLen >= dLen)
require(vLen % dLen == 0)
def vatOlder(i0: UInt, i1: UInt) = cqOlder(i0, i1, io.vat_tail)
val vdq = Module(new DCEQueue(new VectorIssueInst, vParams.vdqEntries))
vdq.io.enq <> io.dis
val perm_buffer = Module(new Compactor(dLenB, dLenB, UInt(8.W), false))
val xissParams = vParams.issStructure.generate(vParams)
val all_supported_insns = xissParams.map(_.insns).flatten
val vlissq = Module(new IssueQueue(vParams.vlissqEntries, 1))
val vsissq = Module(new IssueQueue(vParams.vsissqEntries, 1))
val vpissq = Module(new IssueQueue(vParams.vpissqEntries, 1))
val vxissqs = xissParams.map(q => Module(new IssueQueue(q.depth, q.seqs.size)).suggestName(s"vxissq_${q.name}"))
val vls = Module(new LoadSequencer)
val vss = Module(new StoreSequencer)
val vps = Module(new PermuteSequencer(xissParams.map(_.insns).flatten))
val vxs = xissParams.map(q => q.seqs.map(s =>
Module(new ExecuteSequencer(s.insns)).suggestName(s"vxs${s.name}")
))
val allSeqs = Seq(vls, vss, vps) ++ vxs.flatten
val allIssQs = Seq(vlissq, vsissq, vpissq) ++ vxissqs
val vxus = xissParams.map(_.seqs.map(s => Module(new ExecutionUnit(s.fus)).suggestName(s"vxu${s.name}")))
io.fp_req.valid := false.B
io.fp_req.bits := DontCare
vxus.foreach(_.foreach(_.io.shared_fp_req := DontCare))
vxus.foreach(_.foreach(_.io.shared_fp_resp := DontCare))
val shared_fp_vxu = vxus.flatten.filter(_.hasSharedFPUnits)
require(shared_fp_vxu.size <= 1)
shared_fp_vxu.headOption.foreach { vxu =>
io.fp_req <> vxu.io.shared_fp_req
vxu.io.shared_fp_resp <> io.fp_resp
}
case class IssueGroup(
issq: IssueQueue,
seqs: Seq[PipeSequencer[_]])
val issGroups = Seq(
IssueGroup(vlissq, Seq(vls)),
IssueGroup(vsissq, Seq(vss)),
IssueGroup(vpissq, Seq(vps))
) ++ (vxissqs.zip(vxs).map { case (q, seqs) =>
IssueGroup(q, seqs)
})
vlissq.io.enq.bits.reduction := false.B
vlissq.io.enq.bits.wide_vd := false.B
vlissq.io.enq.bits.wide_vs2 := false.B
vlissq.io.enq.bits.writes_mask := false.B
vlissq.io.enq.bits.reads_vs1_mask := false.B
vlissq.io.enq.bits.reads_vs2_mask := false.B
vlissq.io.enq.bits.nf_log2 := log2_up(vdq.io.deq.bits.nf, 8)
vlissq.io.enq.bits.renv1 := false.B
vlissq.io.enq.bits.renv2 := false.B
vlissq.io.enq.bits.renvd := false.B
vlissq.io.enq.bits.renvm := !vdq.io.deq.bits.vm
vlissq.io.enq.bits.wvd := true.B
vlissq.io.enq.bits.scalar_to_vd0 := false.B
vlissq.io.enq.bits.rs1_is_rs2 := false.B
vsissq.io.enq.bits.reduction := false.B
vsissq.io.enq.bits.wide_vd := false.B
vsissq.io.enq.bits.wide_vs2 := false.B
vsissq.io.enq.bits.writes_mask := false.B
vsissq.io.enq.bits.reads_vs1_mask := false.B
vsissq.io.enq.bits.reads_vs2_mask := false.B
vsissq.io.enq.bits.nf_log2 := log2_up(vdq.io.deq.bits.nf, 8)
vsissq.io.enq.bits.renv1 := false.B
vsissq.io.enq.bits.renv2 := false.B
vsissq.io.enq.bits.renvd := true.B
vsissq.io.enq.bits.renvm := !vdq.io.deq.bits.vm && vdq.io.deq.bits.mop === mopUnit
vsissq.io.enq.bits.wvd := false.B
vsissq.io.enq.bits.scalar_to_vd0 := false.B
vsissq.io.enq.bits.rs1_is_rs2 := false.B
vpissq.io.enq.bits.reduction := false.B
vpissq.io.enq.bits.wide_vd := false.B
vpissq.io.enq.bits.wide_vs2 := false.B
vpissq.io.enq.bits.writes_mask := false.B
vpissq.io.enq.bits.reads_vs1_mask := false.B
vpissq.io.enq.bits.reads_vs2_mask := false.B
vpissq.io.enq.bits.nf_log2 := 0.U
vpissq.io.enq.bits.renv1 := false.B
vpissq.io.enq.bits.renv2 := vdq.io.deq.bits.mop(0) || !vdq.io.deq.bits.vmu
vpissq.io.enq.bits.renvd := true.B
vpissq.io.enq.bits.renvm := !vdq.io.deq.bits.vm && vdq.io.deq.bits.mop =/= mopUnit && vdq.io.deq.bits.vmu
vpissq.io.enq.bits.wvd := false.B
vpissq.io.enq.bits.scalar_to_vd0 := false.B
vpissq.io.enq.bits.rs1_is_rs2 := !vdq.io.deq.bits.vmu && (vdq.io.deq.bits.opif6 === OPIFunct6.rgather || (vdq.io.deq.bits.funct3 === OPIVV && vdq.io.deq.bits.opif6 === OPIFunct6.rgatherei16))
val xdis_ctrl = new VectorDecoder(vdq.io.deq.bits.funct3, vdq.io.deq.bits.funct6, vdq.io.deq.bits.rs1, vdq.io.deq.bits.rs2, all_supported_insns,
Seq(Reduction, Wide2VD, Wide2VS2, WritesAsMask, ReadsVS1AsMask, ReadsVS2AsMask, ReadsVS1, ReadsVS2, ReadsVD,
VMBitReadsVM, AlwaysReadsVM, WritesVD, WritesScalar, ScalarToVD0))
vxissqs.foreach { vxissq =>
vxissq.io.enq.bits.wide_vd := xdis_ctrl.bool(Wide2VD)
vxissq.io.enq.bits.wide_vs2 := xdis_ctrl.bool(Wide2VS2)
vxissq.io.enq.bits.writes_mask := xdis_ctrl.bool(WritesAsMask)
vxissq.io.enq.bits.reads_vs1_mask := xdis_ctrl.bool(ReadsVS1AsMask)
vxissq.io.enq.bits.reads_vs2_mask := xdis_ctrl.bool(ReadsVS2AsMask)
vxissq.io.enq.bits.nf_log2 := 0.U
vxissq.io.enq.bits.renv1 := xdis_ctrl.bool(ReadsVS1)
vxissq.io.enq.bits.renv2 := xdis_ctrl.bool(ReadsVS2)
vxissq.io.enq.bits.renvd := xdis_ctrl.bool(ReadsVD)
vxissq.io.enq.bits.renvm := (!vdq.io.deq.bits.vm && xdis_ctrl.bool(VMBitReadsVM)) || xdis_ctrl.bool(AlwaysReadsVM)
vxissq.io.enq.bits.wvd := !xdis_ctrl.bool(WritesScalar)
vxissq.io.enq.bits.scalar_to_vd0 := xdis_ctrl.bool(ScalarToVD0)
vxissq.io.enq.bits.reduction := xdis_ctrl.bool(Reduction)
vxissq.io.enq.bits.rs1_is_rs2 := false.B
}
val issq_stall = Wire(Vec(issGroups.size, Bool()))
vdq.io.deq.ready := !issq_stall.orR
for ((group, i) <- issGroups.zipWithIndex) {
val otherIssGroups = issGroups.zipWithIndex.filter(_._2 != i).map(_._1)
val otherIssqs = otherIssGroups.map(_.issq)
val otherIssqSeqs = otherIssGroups.map(_.seqs).flatten
for ((seq, j) <- group.seqs.zipWithIndex) {
val otherSameIssqSeqs = group.seqs.zipWithIndex.filter(_._2 != j).map(_._1)
val otherSeqs = otherIssqSeqs ++ otherSameIssqSeqs
val vat = seq.io.vat
seq.io.rvs1 := DontCare
seq.io.rvs2 := DontCare
seq.io.rvd := DontCare
seq.io.rvm := DontCare
seq.io.perm := DontCare
seq.io.acc.valid := false.B
seq.io.acc.bits := DontCare
seq.io.vat_head := io.vat_head
val older_issq_wintents = FillInterleaved(egsPerVReg, otherIssqs.map { i =>
i.io.hazards.map(h => Mux(vatOlder(h.bits.vat, vat) && h.valid, h.bits.wintent, 0.U))
}.flatten.foldLeft(0.U)(_|_))
val older_seq_wintents = otherSeqs.map { s =>
Mux(vatOlder(s.io.seq_hazard.bits.vat, vat) && s.io.seq_hazard.valid, s.io.seq_hazard.bits.wintent, 0.U)
}.reduce(_|_)
val older_wintents = older_issq_wintents | older_seq_wintents
val older_issq_rintents = FillInterleaved(egsPerVReg, otherIssqs.map { i =>
i.io.hazards.map(h => Mux(vatOlder(h.bits.vat, vat) && h.valid, h.bits.rintent, 0.U))
}.flatten.foldLeft(0.U)(_|_))
val older_seq_rintents = otherSeqs.map { s =>
Mux(vatOlder(s.io.seq_hazard.bits.vat, vat) && s.io.seq_hazard.valid, s.io.seq_hazard.bits.rintent, 0.U)
}.reduce(_|_)
val older_rintents = older_issq_rintents | older_seq_rintents
val older_pipe_writes = vxus.flatten.map(_.io.pipe_hazards.toSeq).flatten.map { h =>
Mux(h.valid, h.bits.eg_oh, 0.U)
}.reduce(_|_)
val older_iter_writes = vxus.flatten.map(_.io.iter_hazards.toSeq).flatten.map { h =>
Mux(h.valid, h.bits.eg_oh, 0.U)
}.reduce(_|_)
seq.io.older_writes := older_pipe_writes | older_iter_writes | older_wintents
seq.io.older_reads := older_rintents
if (!vParams.enableOOO) {
// stall dispatch if any other sequencers are at the head and stalled
seq.io.dis_stall := otherSeqs.map { s =>
s.io.busy && s.io.head && !(s.io.iss.valid && s.io.iss.ready)
}.orR
} else {
seq.io.dis_stall := false.B // never stall dispatch
}
}
val accepts = group.seqs.map(_.accepts(vdq.io.deq.bits))
issq_stall(i) := !group.issq.io.enq.ready && accepts.orR
group.issq.io.enq.valid := vdq.io.deq.valid && !issq_stall.orR && accepts.orR
group.issq.io.enq.bits.viewAsSupertype(new VectorIssueInst) := vdq.io.deq.bits
group.issq.io.enq.bits.seq := VecInit(accepts).asUInt
// In case of multiple available sequencers, select the first ready one
val valid_seqs = group.issq.io.deq.bits.seq
val ready_seqs = VecInit(group.seqs.map(_.io.dis.ready)).asUInt
val chosen_seq = PriorityEncoder(valid_seqs & ready_seqs)
group.seqs.zipWithIndex.foreach{ case(s, j) =>
s.io.dis.valid := group.issq.io.deq.valid && chosen_seq === j.U
s.io.dis.bits := group.issq.io.deq.bits.viewAsSupertype(new BackendIssueInst)
}
group.issq.io.deq.ready := (valid_seqs & ready_seqs) =/= 0.U
}
val flat_vxs = vxs.flatten
val flat_vxus = vxus.flatten
require(flat_vxs.size == flat_vxus.size)
// Hazard checking for multi-VXS
// Check if there is a VRF write port hazard against the in-flight insns in other VXUs
// Check if there is a VRF write port hazard against a simultaneously issuing insn
// from another VXS (check that it's actually a valid hazard)
val inflight_hazards = WireInit(VecInit(Seq.fill(flat_vxs.length)(false.B)))
for (i <- 0 until flat_vxs.length) {
val other_vxu_idx = (0 until flat_vxs.length).filter(_ != i)
val inflight_hazard = other_vxu_idx.map(flat_vxus(_).io.pipe_hazards).flatten.map { hazard =>
hazard.valid &&
(hazard.bits.latency === flat_vxus(i).io.issue_pipe_latency) &&
(hazard.bits.eg(vrfBankBits-1,0) === flat_vxs(i).io.iss.bits.wvd_eg(vrfBankBits-1,0))
}.reduceOption(_ || _).getOrElse(false.B)
inflight_hazards(i) := inflight_hazard
val issue_hazard = other_vxu_idx.map { other_iss =>
(flat_vxus(other_iss).io.issue_pipe_latency === flat_vxus(i).io.issue_pipe_latency) &&
(flat_vxs(other_iss).io.iss.bits.wvd_eg(vrfBankBits-1,0) === flat_vxs(i).io.iss.bits.wvd_eg(vrfBankBits-1,0)) &&
vatOlder(flat_vxs(other_iss).io.iss.bits.vat, flat_vxs(i).io.iss.bits.vat) &&
!inflight_hazards(other_iss) &&
flat_vxs(other_iss).io.iss.valid &&
flat_vxus(other_iss).io.iss.ready
}.reduceOption(_ || _).getOrElse(false.B)
flat_vxus(i).io.iss.valid := flat_vxs(i).io.iss.valid && !inflight_hazard && !issue_hazard
flat_vxs(i).io.iss.ready := flat_vxus(i).io.iss.ready && !inflight_hazard && !issue_hazard
flat_vxus(i).io.iss.bits := flat_vxs(i).io.iss.bits
flat_vxs(i).io.acc := flat_vxus(i).io.acc_write
}
// Read ports are
// vxs0-vrs1, vxs1-vrs1, vmu-index, frontend-index
// vxs0-vrs2, vxs1-vrs2
// vxs0-vrs3, vxs1-vrs3, vss-vrd
// vxs0-mask, vxs1-mask, vls-mask, vss-mask, vps-mask, frontend-mask
// Mask ports are
// vxs0-mask, vxs1-mask, vls-mask, vss-mask, vps-mask, frontend-mask
val vrf = Module(new RegisterFile(
reads = Seq(2 + flat_vxs.size, flat_vxs.size, 1 + flat_vxs.size),
maskReads = Seq(4 + flat_vxs.size),
pipeWrites = flat_vxus.size,
llWrites = flat_vxus.size + 2 // vxus + load + reset
))
val load_write = Wire(Decoupled(new VectorWrite(dLen)))
io.vmu.lresp.ready := vls.io.iss.valid && load_write.ready
vls.io.iss.ready := io.vmu.lresp.valid && load_write.ready
load_write.valid := vls.io.iss.valid && io.vmu.lresp.valid
load_write.bits.eg := vls.io.iss.bits.wvd_eg
load_write.bits.data := io.vmu.lresp.bits.data
load_write.bits.mask := FillInterleaved(8, vls.io.iss.bits.wmask)
when (io.vmu.lresp.fire) {
assert(io.vmu.lresp.bits.debug_id === vls.io.iss.bits.debug_id)
}
val resetting = RegInit(true.B)
val reset_ctr = RegInit(0.U(log2Ceil(egsTotal).W))
when (resetting) {
reset_ctr := reset_ctr + 1.U
io.dis.ready := false.B
}
when (~reset_ctr === 0.U) { resetting := false.B }
// Write ports
vrf.io.pipe_writes.zip(vxus.flatten).foreach { case (w,vxu) =>
w := vxu.io.pipe_write
}
vrf.io.ll_writes(0) <> load_write
vrf.io.ll_writes(1).valid := resetting
vrf.io.ll_writes(1).bits.eg := reset_ctr
vrf.io.ll_writes(1).bits.data := 0.U
vrf.io.ll_writes(1).bits.mask := ~(0.U(dLen.W))
vxus.flatten.zipWithIndex.foreach { case (vxu,i) =>
vrf.io.ll_writes(2+i) <> vxu.io.iter_write
}
flat_vxs.zipWithIndex.foreach { case(xs, i) =>
vrf.io.read(0)(i) <> xs.io.rvs1
vrf.io.read(1)(i) <> xs.io.rvs2
vrf.io.read(2)(i) <> xs.io.rvd
vrf.io.mask_read(0)(i) <> xs.io.rvm
}
vrf.io.read(0)(flat_vxs.length) <> vps.io.rvs2
vps.io.rvs1.req.ready := true.B
val index_access_eg = getEgId(io.index_access.vrs, io.index_access.eidx, io.index_access.eew, false.B)
val index_access_eg_oh = UIntToOH(index_access_eg)
val index_access_hazard = (allSeqs.map(_.io.seq_hazard).map { h =>
h.valid && ((h.bits.wintent & index_access_eg_oh) =/= 0.U)
} ++ allIssQs.map(_.io.hazards).flatten.map { h =>
h.valid && h.bits.wintent(io.index_access.vrs)
} ++ vxus.flatten.map(_.io.pipe_hazards).flatten.map { h =>
h.valid && h.bits.eg === index_access_eg
} ++ vxus.flatten.map(_.io.iter_hazards).flatten.map { h =>
h.valid && h.bits.eg === index_access_eg
}).orR || vdq.io.peek.map(i => i.valid && !(i.bits.vmu && i.bits.store)).orR
// TODO: this conservatively assumes a index data hazard against anything in the vdq
vrf.io.read(0)(flat_vxs.size+1).req.valid := io.index_access.valid && !index_access_hazard
io.index_access.ready := vrf.io.read(0)(flat_vxs.size+1).req.ready && !index_access_hazard
vrf.io.read(0)(flat_vxs.size+1).req.bits.eg := index_access_eg
vrf.io.read(0)(flat_vxs.size+1).req.bits.oldest := false.B
io.index_access.idx := vrf.io.read(0)(flat_vxs.size+1).resp >> ((io.index_access.eidx << io.index_access.eew)(dLenOffBits-1,0) << 3) & eewBitMask(io.index_access.eew)
vrf.io.read(2)(flat_vxs.size) <> vss.io.rvd
io.vmu.sdata.valid := vss.io.iss.valid
io.vmu.sdata.bits := vss.io.iss.bits
vss.io.iss.ready := io.vmu.sdata.ready
vrf.io.mask_read(0)(flat_vxs.length) <> vls.io.rvm
vrf.io.mask_read(0)(flat_vxs.length+1) <> vss.io.rvm
vrf.io.mask_read(0)(flat_vxs.length+2) <> vps.io.rvm
val vm_busy = Wire(Bool())
vrf.io.mask_read(0)(flat_vxs.length+3).req.valid := io.mask_access.valid && !vm_busy
vrf.io.mask_read(0)(flat_vxs.length+3).req.bits.eg := getEgId(0.U, io.mask_access.eidx, 0.U, true.B)
vrf.io.mask_read(0)(flat_vxs.length+3).req.bits.oldest := false.B
io.mask_access.ready := vrf.io.mask_read(0)(flat_vxs.length+3).req.ready && !vm_busy
io.mask_access.mask := vrf.io.mask_read(0)(flat_vxs.length+3).resp >> io.mask_access.eidx(log2Ceil(dLen)-1,0)
val vmu_index_q = Module(new Compactor(dLenB, dLenB, UInt(8.W), false))
val vmu_mask_q = Module(new Compactor(dLenB, dLenB, Bool(), false))
val perm_q = Module(new DCEQueue(new PermuteMicroOp, 2))
vmu_index_q.io.push_data := vps.io.iss.bits.rvs2_data.asTypeOf(Vec(dLenB, UInt(8.W)))
vmu_index_q.io.push.bits.head := vps.io.iss.bits.eidx << vps.io.iss.bits.rvs2_eew
vmu_index_q.io.push.bits.tail := Mux(vps.io.iss.bits.tail,
vps.io.iss.bits.vl << vps.io.iss.bits.rvs2_eew,
0.U)
vmu_mask_q.io.push_data := (vps.io.iss.bits.rvm_data >> vps.io.iss.bits.eidx(log2Ceil(dLen)-1,0))(dLenB-1,0).asBools
vmu_mask_q.io.push.bits.head := 0.U
vmu_mask_q.io.push.bits.tail := Mux(vps.io.iss.bits.tail, vps.io.iss.bits.vl, 0.U) - vps.io.iss.bits.eidx
vps.io.iss.ready := Mux(vps.io.iss.bits.vmu,
vmu_index_q.io.push.ready && vmu_mask_q.io.push.ready,
perm_q.io.enq.ready)
vmu_index_q.io.push.valid := vps.io.iss.valid && vps.io.iss.bits.vmu && vps.io.iss.bits.renv2 && vps.io.iss.ready
vmu_mask_q.io.push.valid := vps.io.iss.valid && vps.io.iss.bits.vmu && vps.io.iss.bits.renvm && vps.io.iss.ready
io.vmu.mask_pop <> vmu_mask_q.io.pop
io.vmu.mask_data := vmu_mask_q.io.pop_data
io.vmu.index_pop <> vmu_index_q.io.pop
io.vmu.index_data := vmu_index_q.io.pop_data
perm_q.io.enq.valid := vps.io.iss.valid && !vps.io.iss.bits.vmu
perm_q.io.enq.bits := vps.io.iss.bits
perm_q.io.deq.ready := perm_buffer.io.push.ready
perm_buffer.io.push.valid := perm_q.io.deq.valid
perm_buffer.io.push.bits.head := perm_q.io.deq.bits.eidx << perm_q.io.deq.bits.rvs2_eew
perm_buffer.io.push.bits.tail := Mux(perm_q.io.deq.bits.tail,
perm_q.io.deq.bits.vl << perm_q.io.deq.bits.rvs2_eew,
0.U)
perm_buffer.io.push_data := perm_q.io.deq.bits.rvs2_data.asTypeOf(Vec(dLenB, UInt(8.W)))
perm_buffer.io.pop <> vxs.head.head.io.perm.req
vxs.head.head.io.perm.data := perm_buffer.io.pop_data.asUInt
// Clear the age tags
var r_idx = 0
def clearVat(fire: Bool, tag: UInt) = {
assert(r_idx < nRelease)
io.vat_release(r_idx).valid := fire
io.vat_release(r_idx).bits := tag
r_idx += 1
}
clearVat(vls.io.iss.fire && vls.io.iss.bits.tail, vls.io.iss.bits.vat)
clearVat(vss.io.iss.fire && vss.io.iss.bits.tail, vss.io.iss.bits.vat)
vxs.flatten.foreach(xs => clearVat(xs.io.iss.fire && xs.io.iss.bits.tail, xs.io.iss.bits.vat))
// Signalling to frontend
val seq_inflight_wv0 = (allSeqs.map(_.io.seq_hazard).map { h =>
h.valid && ((h.bits.wintent & ~(0.U(egsPerVReg.W))) =/= 0.U)
} ++ allIssQs.map(_.io.hazards).flatten.map { h =>
h.valid && h.bits.wintent(0)
} ++ vxus.flatten.map(_.io.pipe_hazards).flatten.map { h =>
h.valid && (h.bits.eg < egsPerVReg.U)
} ++ vxus.flatten.map(_.io.iter_hazards).flatten.map { h =>
h.valid && (h.bits.eg < egsPerVReg.U)
}).orR
val vdq_inflight_wv0 = vdq.io.peek.map { h =>
h.valid && h.bits.may_write_v0
}.orR
vm_busy := seq_inflight_wv0 || vdq_inflight_wv0
io.busy := vdq.io.deq.valid || allSeqs.map(_.io.busy).orR || vxus.flatten.map(_.io.busy).asUInt.orR || resetting
io.set_vxsat := vxus.flatten.map(_.io.set_vxsat).asUInt.orR
io.set_fflags.valid := vxus.flatten.map(_.io.set_fflags.valid).asUInt.orR
io.set_fflags.bits := vxus.flatten.map( xu => Mux(xu.io.set_fflags.valid, xu.io.set_fflags.bits, 0.U)).reduce(_|_)
// Only one of these should actually be connected
val scalar_write_arb = Module(new Arbiter(new ScalarWrite, flat_vxus.size))
vxus.flatten.map(_.io.scalar_write).zip(scalar_write_arb.io.in).foreach { case (i,o) => o <> i }
io.scalar_resp <> scalar_write_arb.io.out
}
File ExecuteSequencer.scala:
package saturn.backend
import chisel3._
import chisel3.util._
import chisel3.experimental.dataview._
import org.chipsalliance.cde.config._
import freechips.rocketchip.rocket._
import freechips.rocketchip.util._
import freechips.rocketchip.tile._
import saturn.common._
import saturn.insns._
class ExecuteSequencer(supported_insns: Seq[VectorInstruction])(implicit p: Parameters) extends PipeSequencer(new ExecuteMicroOp)(p) {
def accepts(inst: VectorIssueInst) = !inst.vmu && new VectorDecoder(inst.funct3, inst.funct6, inst.rs1, inst.rs2, supported_insns, Nil).matched
val valid = RegInit(false.B)
val inst = Reg(new BackendIssueInst)
val head = Reg(Bool())
val reduction_head = Reg(Bool())
val wvd_mask = Reg(UInt(egsTotal.W))
val rvs1_mask = Reg(UInt(egsTotal.W))
val rvs2_mask = Reg(UInt(egsTotal.W))
val rvd_mask = Reg(UInt(egsTotal.W))
val rvm_mask = Reg(UInt(egsPerVReg.W))
val slide = Reg(Bool())
val slide_up = Reg(Bool())
val slide1 = Reg(Bool())
val slide_offset = Reg(UInt((1+log2Ceil(maxVLMax)).W))
val perm_head = Reg(UInt(dLenOffBits.W))
val perm_tail = Reg(UInt(dLenOffBits.W))
val acc = Reg(Vec(dLenB, UInt(8.W)))
val acc_ready = Reg(Bool())
val acc_tail = Reg(Bool())
val acc_tail_id = Reg(UInt(log2Ceil(dLenB).W))
val ctrl = new VectorDecoder(inst.funct3, inst.funct6, inst.rs1, inst.rs2, supported_insns,
Seq(SetsWMask, UsesPermuteSeq, FPAdd, FPComp, Elementwise, UsesNarrowingSext, ZextImm5))
val mvnrr = inst.funct3 === OPIVI && inst.opif6 === OPIFunct6.mvnrr
val rgatherei16 = inst.funct3 === OPIVV && inst.opif6 === OPIFunct6.rgatherei16
val compress = inst.opmf6 === OPMFunct6.compress
val vs1_eew = Mux(rgatherei16, 1.U, inst.vconfig.vtype.vsew)
val vs2_eew = inst.vconfig.vtype.vsew + inst.wide_vs2 - Mux(ctrl.bool(UsesNarrowingSext), ~inst.rs1(2,1) + 1.U, 0.U)
val vs3_eew = inst.vconfig.vtype.vsew + inst.wide_vd
val vd_eew = inst.vconfig.vtype.vsew + inst.wide_vd
val incr_eew = Seq(
Mux(inst.renv1, vs1_eew, 0.U),
Mux(inst.renv2, vs2_eew, 0.U),
Mux(inst.renvd, vs3_eew, 0.U),
vd_eew).foldLeft(0.U(2.W)) { case (b, a) => Mux(a > b, a, b) }
val acc_elementwise_opcodes = (Seq(OPFFunct6.fredosum, OPFFunct6.fwredosum) ++
(if (vParams.useScalarFPMisc) Seq(OPFFunct6.fredmax, OPFFunct6.fredmin) else Nil) ++
(if (vParams.useScalarFPFMA) Seq(OPFFunct6.fredusum, OPFFunct6.fwredusum) else Nil)
)
val acc_copy = (vd_eew === 3.U && (dLenB == 8).B) || inst.opff6.isOneOf(acc_elementwise_opcodes)
val acc_last = acc_tail_id + 1.U === log2Ceil(dLenB).U - vd_eew || acc_copy
val uscalar = Mux(inst.funct3(2), inst.rs1_data, inst.imm5)
val sscalar = Mux(inst.funct3(2), inst.rs1_data, inst.imm5_sext)
val rgather = inst.opif6 === OPIFunct6.rgather
val rgather_ix = rgather && inst.funct3.isOneOf(OPIVX, OPIVI)
val rgather_v = rgather && inst.funct3.isOneOf(OPIVV)
val renv1 = Mux(inst.reduction, reduction_head, inst.renv1)
val renv2 = Mux(rgather_ix, head, Mux(inst.reduction, !reduction_head && !acc_tail, inst.renv2))
val renvd = inst.renvd
val renvm = inst.renvm
val renacc = inst.reduction
val use_wmask = !inst.vm && ctrl.bool(SetsWMask)
val eidx = Reg(UInt(log2Ceil(maxVLMax).W))
val eff_vl = Mux(mvnrr, ((vLen/8).U >> vd_eew) << inst.emul, Mux(inst.scalar_to_vd0, 1.U, inst.vconfig.vl))
val increments_as_mask = (!inst.renv1 || inst.reads_vs1_mask) && (!inst.renv2 || inst.reads_vs2_mask) && (!inst.wvd || inst.writes_mask)
val next_eidx = get_next_eidx(eff_vl, eidx, incr_eew, 0.U, increments_as_mask, ctrl.bool(Elementwise))
val eidx_tail = next_eidx === eff_vl
val tail = Mux(inst.reduction, acc_tail && acc_last, eidx_tail)
io.dis.ready := (!valid || (tail && io.iss.fire)) && !io.dis_stall
when (io.dis.fire) {
val dis_inst = io.dis.bits
valid := true.B
inst := io.dis.bits
assert(dis_inst.vstart === 0.U)
eidx := 0.U
val vd_arch_mask = get_arch_mask(dis_inst.rd , dis_inst.emul +& dis_inst.wide_vd)
val vs1_arch_mask = get_arch_mask(dis_inst.rs1, Mux(dis_inst.reads_vs1_mask, 0.U, dis_inst.emul))
val vs2_arch_mask = get_arch_mask(dis_inst.rs2, Mux(dis_inst.reads_vs2_mask, 0.U, dis_inst.emul +& dis_inst.wide_vs2))
wvd_mask := Mux(dis_inst.wvd , FillInterleaved(egsPerVReg, vd_arch_mask), 0.U)
rvs1_mask := Mux(dis_inst.renv1, FillInterleaved(egsPerVReg, vs1_arch_mask), 0.U)
rvs2_mask := Mux(dis_inst.renv2, FillInterleaved(egsPerVReg, vs2_arch_mask), 0.U)
rvd_mask := Mux(dis_inst.renvd, FillInterleaved(egsPerVReg, vd_arch_mask), 0.U)
rvm_mask := Mux(dis_inst.renvm, ~(0.U(egsPerVReg.W)), 0.U)
head := true.B
reduction_head := true.B
acc_tail := false.B
acc_tail_id := 0.U
acc_ready := true.B
val dis_slide = (dis_inst.funct6.isOneOf(OPIFunct6.slideup.litValue.U, OPIFunct6.slidedown.litValue.U)
&& dis_inst.funct3 =/= OPIVV)
val dis_slide_up = !dis_inst.funct6(0)
val dis_vl = dis_inst.vconfig.vl
val dis_sew = dis_inst.vconfig.vtype.vsew
val dis_vlmax = dis_inst.vconfig.vtype.vlMax
val dis_next_eidx = get_next_eidx(dis_vl, 0.U, dis_sew, 0.U, false.B, false.B)
val dis_slide1 = !dis_inst.isOpi
val dis_uscalar = Mux(dis_inst.funct3(2), dis_inst.rs1_data, dis_inst.imm5)
val dis_slide_offset = Mux(!dis_slide1, get_max_offset(dis_uscalar), 1.U)
val dis_tail = dis_next_eidx === dis_vl
val dis_rgather_eew = Mux(dis_inst.opif6 === OPIFunct6.rgatherei16, 1.U, dis_sew)
slide := dis_slide
when (dis_slide) {
slide_up := dis_slide_up
slide1 := dis_slide1
slide_offset := dis_slide_offset
}
perm_head := Mux(dis_slide && dis_slide_up,
(dis_slide_offset << dis_sew)(dLenOffBits-1,0),
0.U)
perm_tail := Mux(dis_slide,
Mux(dis_slide_up,
Mux(dis_tail, dis_vl << dis_sew, 0.U),
(Mux(dis_next_eidx + dis_slide_offset <= dis_vlmax, dis_next_eidx, dis_vlmax - dis_slide_offset) << dis_sew)(dLenOffBits-1,0)
),
1.U << dis_rgather_eew)
} .elsewhen (io.iss.fire) {
valid := !tail
head := false.B
}
when (io.acc.valid) {
acc_ready := true.B
for (i <- 0 until dLenB) when (io.acc.bits.mask(i*8)) { acc(i) := io.acc.bits.data >> (i*8) }
}
io.vat := inst.vat
io.seq_hazard.valid := valid
io.seq_hazard.bits.rintent := hazardMultiply(rvs1_mask | rvs2_mask | rvd_mask | rvm_mask)
io.seq_hazard.bits.wintent := hazardMultiply(wvd_mask)
io.seq_hazard.bits.vat := inst.vat
val vs1_read_oh = Mux(renv1 , UIntToOH(io.rvs1.req.bits.eg), 0.U)
val vs2_read_oh = Mux(renv2 , UIntToOH(io.rvs2.req.bits.eg), 0.U)
val vd_read_oh = Mux(renvd , UIntToOH(io.rvd.req.bits.eg ), 0.U)
val vm_read_oh = Mux(renvm , UIntToOH(io.rvm.req.bits.eg ), 0.U)
val vd_write_oh = Mux(inst.wvd, UIntToOH(io.iss.bits.wvd_eg), 0.U)
val raw_hazard = ((vs1_read_oh | vs2_read_oh | vd_read_oh | vm_read_oh) & io.older_writes) =/= 0.U
val waw_hazard = (vd_write_oh & io.older_writes) =/= 0.U
val war_hazard = (vd_write_oh & io.older_reads) =/= 0.U
val data_hazard = raw_hazard || waw_hazard || war_hazard
val acc_insns = supported_insns.filter(_.props.contains(Reduction.Y))
val acc_ctrl = new VectorDecoder(inst.funct3, inst.funct6, inst.rs1, inst.rs2, acc_insns, Seq(AccInitZeros, AccInitOnes, AccInitPos, AccInitNeg))
val acc_init_fp_pos = inst.opff6 === OPFFunct6.fredmin
val acc_init_fp_neg = inst.opff6 === OPFFunct6.fredmax
val acc_init = Mux1H(Seq(
(acc_ctrl.bool(AccInitZeros) , 0.U(dLen.W)),
(acc_ctrl.bool(AccInitOnes) , ~(0.U(dLen.W))),
(acc_ctrl.bool(AccInitPos) , VecInit.tabulate(4)({sew => Fill(dLenB >> sew, maxPosUInt(sew))})(vd_eew)),
(acc_ctrl.bool(AccInitNeg) , VecInit.tabulate(4)({sew => Fill(dLenB >> sew, minNegUInt(sew))})(vd_eew)),
(acc_init_fp_pos, VecInit.tabulate(4)({sew => Fill(dLenB >> sew, maxPosFPUInt(sew))})(vd_eew)),
(acc_init_fp_neg, VecInit.tabulate(4)({sew => Fill(dLenB >> sew, minNegFPUInt(sew))})(vd_eew)),
))
val rgather_eidx = get_max_offset(Mux(rgather_ix && rgather, uscalar, io.perm.data & eewBitMask(vs1_eew)))
val rgather_zero = rgather_eidx >= inst.vconfig.vtype.vlMax
val rvs2_eidx = Mux(rgather || rgatherei16, rgather_eidx, eidx)
io.rvs1.req.bits.eg := getEgId(inst.rs1, eidx , vs1_eew, inst.reads_vs1_mask)
io.rvs2.req.bits.eg := getEgId(inst.rs2, rvs2_eidx, vs2_eew, inst.reads_vs2_mask)
io.rvd.req.bits.eg := getEgId(inst.rd , eidx , vs3_eew, false.B)
io.rvm.req.bits.eg := getEgId(0.U , eidx , 0.U , true.B)
io.rvs1.req.valid := valid && renv1
io.rvs2.req.valid := valid && renv2
io.rvd.req.valid := valid && renvd
io.rvm.req.valid := valid && renvm
val oldest = inst.vat === io.vat_head
io.rvs1.req.bits.oldest := oldest
io.rvs2.req.bits.oldest := oldest
io.rvd.req.bits.oldest := oldest
io.rvm.req.bits.oldest := oldest
val read_perm_buffer = ctrl.bool(UsesPermuteSeq) && (!slide || Mux(slide_up,
next_eidx > slide_offset,
eidx +& slide_offset < inst.vconfig.vtype.vlMax))
io.perm.req.bits.head := perm_head
io.perm.req.bits.tail := perm_tail
val slide_down_byte_mask = Mux(slide && !slide_up && next_eidx + slide_offset > inst.vconfig.vtype.vlMax,
Mux(eidx +& slide_offset >= inst.vconfig.vtype.vlMax,
0.U,
~(0.U(dLenB.W)) >> (0.U(dLenOffBits.W) - ((inst.vconfig.vtype.vlMax - slide_offset) << vs2_eew))(dLenOffBits-1,0)),
~(0.U(dLenB.W)))
val slide_down_bit_mask = FillInterleaved(8, slide_down_byte_mask)
val iss_valid = (valid &&
!data_hazard &&
!(renv1 && !io.rvs1.req.ready) &&
!(renv2 && !io.rvs2.req.ready) &&
!(renvd && !io.rvd.req.ready) &&
!(renvm && !io.rvm.req.ready) &&
!(read_perm_buffer && !io.perm.req.ready) &&
!(renacc && !acc_ready)
)
io.perm.req.valid := iss_valid && read_perm_buffer && io.iss.ready
io.iss.valid := iss_valid && !(inst.reduction && reduction_head)
io.iss.bits.rvs1_data := io.rvs1.resp
io.iss.bits.rvs2_data := io.rvs2.resp
io.iss.bits.rvd_data := io.rvd.resp
io.iss.bits.rvs1_elem := extractElem(io.rvs1.resp, vs1_eew, eidx)
io.iss.bits.rvs2_elem := extractElem(io.rvs2.resp, vs2_eew, eidx)
io.iss.bits.rvd_elem := extractElem(io.rvd.resp , vs3_eew, eidx)
io.iss.bits.rvs1_eew := vs1_eew
io.iss.bits.rvs2_eew := vs2_eew
io.iss.bits.rvd_eew := vs3_eew
io.iss.bits.vd_eew := vd_eew
io.iss.bits.eidx := eidx
io.iss.bits.vl := inst.vconfig.vl
io.iss.bits.wvd_eg := getEgId(inst.rd, Mux(inst.reduction, 0.U, eidx), vd_eew, inst.writes_mask)
io.iss.bits.rs1 := inst.rs1
io.iss.bits.rs2 := inst.rs2
io.iss.bits.rd := inst.rd
io.iss.bits.funct3 := inst.funct3
io.iss.bits.funct6 := inst.funct6
io.iss.bits.tail := tail
io.iss.bits.head := head
io.iss.bits.acc := inst.reduction
io.iss.bits.vat := inst.vat
io.iss.bits.vm := inst.vm
io.iss.bits.rm := inst.rm
val dlen_mask = ~(0.U(dLenB.W))
val head_mask = dlen_mask << (eidx << vd_eew)(dLenOffBits-1,0)
val tail_mask = dlen_mask >> (0.U(dLenOffBits.W) - (next_eidx << vd_eew)(dLenOffBits-1,0))
val slide1up_mask = Mux(head && !inst.isOpi, eewByteMask(vs2_eew), 0.U)
val slideup_mask = Mux(slide && slide_up && eidx < slide_offset,
Mux(next_eidx <= slide_offset, 0.U, dlen_mask << (slide_offset << vd_eew)(dLenOffBits-1,0)) | slide1up_mask,
dlen_mask)
val full_tail_mask = Mux(tail,
~(0.U(dLen.W)) >> (0.U(log2Ceil(dLen).W) - eff_vl(log2Ceil(dLen)-1,0)),
~(0.U(dLen.W))
)
val vm_off = ((1 << dLenOffBits) - 1).U(log2Ceil(dLen).W)
val vm_eidx = (eidx & ~(vm_off >> vd_eew))(log2Ceil(dLen)-1,0)
val vm_resp = (io.rvm.resp >> vm_eidx)(dLenB-1,0)
val vm_mask = Mux(use_wmask,
VecInit.tabulate(4)({ sew => FillInterleaved(1 << sew, vm_resp)(dLenB-1,0) })(vd_eew),
~(0.U(dLenB.W))
)
val acc_mask = Mux(acc_last,
eewByteMask(vd_eew),
VecInit.tabulate(log2Ceil(dLenB))(i => ~(0.U((dLen>>i).W)))(acc_tail_id))
io.iss.bits.wmask := Mux(inst.reduction && acc_tail,
acc_mask,
head_mask & tail_mask & vm_mask & slideup_mask)
io.iss.bits.rmask := Mux(inst.vm, ~(0.U(dLenB.W)), vm_resp)
io.iss.bits.rvm_data := Mux(inst.vm, ~(0.U(dLen.W)), io.rvm.resp)
io.iss.bits.full_tail_mask := full_tail_mask
when (inst.funct3.isOneOf(OPIVI, OPIVX, OPMVX, OPFVF)) {
io.iss.bits.rvs1_elem := sscalar
io.iss.bits.rvs1_data := dLenSplat(Mux(ctrl.bool(ZextImm5), uscalar, sscalar), vs1_eew)
}
when (inst.reduction) {
val acc_bits = acc.asUInt
val elementwise_acc = inst.opff6.isOneOf(OPFFunct6.fredosum, OPFFunct6.fwredosum) || (
vParams.useScalarFPMisc.B && ctrl.bool(FPComp) && inst.isOpf
) || (
vParams.useScalarFPFMA.B && ctrl.bool(FPAdd) && inst.isOpf
)
when (elementwise_acc && !acc_tail) {
io.iss.bits.rvs2_data := io.iss.bits.rvs2_elem
val mask_bit = Mux(use_wmask, (io.rvm.resp >> eidx(log2Ceil(dLen)-1,0))(0), true.B)
io.iss.bits.wmask := VecInit.tabulate(4)({sew => Fill(1 << sew, mask_bit)})(vd_eew)
}
when (acc_tail) {
val folded = VecInit.tabulate(log2Ceil(dLenB))(i => {
val start = dLen >> (1 + i)
acc_bits(2*start-1,start)
})(acc_tail_id)
io.iss.bits.rvs1_elem := Mux(acc_copy, acc_init, folded)
io.iss.bits.rvs1_data := Mux(acc_copy, acc_init, folded)
io.iss.bits.rvs1_eew := vd_eew
io.iss.bits.rvs2_elem := acc_bits
io.iss.bits.rvs2_data := acc_bits
io.iss.bits.rvs2_eew := vd_eew
} .otherwise {
io.iss.bits.rvs1_elem := acc_bits
io.iss.bits.rvs1_data := acc_bits
io.iss.bits.rvs1_eew := vd_eew
}
}
when (rgather_v || rgatherei16) {
io.iss.bits.rvs1_elem := rgather_eidx
io.iss.bits.rvs1_data := rgather_eidx
}
when (rgather_zero && (rgather || rgatherei16)) {
io.iss.bits.rvs2_elem := 0.U
io.iss.bits.rvs2_data := 0.U
}
when (slide) {
io.iss.bits.rvs2_elem := io.perm.data & slide_down_bit_mask
io.iss.bits.rvs2_data := io.perm.data & slide_down_bit_mask
}
when (iss_valid && inst.reduction && reduction_head) {
val v0_mask = eewBitMask(vd_eew)
acc := ((acc_init & ~v0_mask.pad(dLen)) | (io.rvs1.resp & v0_mask)).asTypeOf(Vec(dLenB, UInt(8.W)))
reduction_head := false.B
}
when (io.iss.fire && !tail) {
when (next_is_new_eg(eidx, next_eidx, vd_eew, inst.writes_mask) && !inst.reduction && !compress && vParams.enableChaining.B) {
val wvd_clr_mask = UIntToOH(io.iss.bits.wvd_eg)
wvd_mask := wvd_mask & ~wvd_clr_mask
}
when (next_is_new_eg(eidx, next_eidx, vs2_eew, inst.reads_vs2_mask) && !(inst.reduction && head) && !rgather_v && !rgatherei16 && vParams.enableChaining.B) {
rvs2_mask := rvs2_mask & ~UIntToOH(io.rvs2.req.bits.eg)
}
when (rgather_ix && vParams.enableChaining.B) {
rvs2_mask := 0.U
}
when (next_is_new_eg(eidx, next_eidx, vs1_eew, inst.reads_vs1_mask) && vParams.enableChaining.B) {
rvs1_mask := rvs1_mask & ~UIntToOH(io.rvs1.req.bits.eg)
}
when (next_is_new_eg(eidx, next_eidx, vs3_eew, false.B) && vParams.enableChaining.B) {
rvd_mask := rvd_mask & ~UIntToOH(io.rvd.req.bits.eg)
}
when (next_is_new_eg(eidx, next_eidx, 0.U , true.B) && vParams.enableChaining.B) {
rvm_mask := rvm_mask & ~UIntToOH(io.rvm.req.bits.eg)
}
acc_ready := false.B
when (eidx_tail) { acc_tail := true.B }
when (acc_tail) { acc_tail_id := acc_tail_id + 1.U }
eidx := next_eidx
when (ctrl.bool(UsesPermuteSeq) && slide) {
val next_next_eidx = get_next_eidx(eff_vl, next_eidx, incr_eew, 0.U, increments_as_mask, ctrl.bool(Elementwise))
val next_tail = next_next_eidx === eff_vl
perm_head := Mux(slide_up,
Mux(next_eidx < slide_offset, (slide_offset << vs2_eew)(dLenOffBits-1,0), 0.U),
next_eidx << vs2_eew)
perm_tail := Mux(slide_up,
Mux(next_tail, eff_vl << vs2_eew, 0.U),
(Mux(next_next_eidx + slide_offset <= inst.vconfig.vtype.vlMax, next_next_eidx, inst.vconfig.vtype.vlMax - slide_offset) << vs2_eew)(dLenOffBits-1,0))
}
}
io.busy := valid
io.head := head
}
| module VectorBackend( // @[Backend.scala:14:7]
input clock, // @[Backend.scala:14:7]
input reset, // @[Backend.scala:14:7]
output io_dis_ready, // @[Backend.scala:15:14]
input io_dis_valid, // @[Backend.scala:15:14]
input [31:0] io_dis_bits_bits, // @[Backend.scala:15:14]
input [8:0] io_dis_bits_vconfig_vl, // @[Backend.scala:15:14]
input [2:0] io_dis_bits_vconfig_vtype_vsew, // @[Backend.scala:15:14]
input io_dis_bits_vconfig_vtype_vlmul_sign, // @[Backend.scala:15:14]
input [1:0] io_dis_bits_vconfig_vtype_vlmul_mag, // @[Backend.scala:15:14]
input [7:0] io_dis_bits_vstart, // @[Backend.scala:15:14]
input [2:0] io_dis_bits_segstart, // @[Backend.scala:15:14]
input [2:0] io_dis_bits_segend, // @[Backend.scala:15:14]
input [63:0] io_dis_bits_rs1_data, // @[Backend.scala:15:14]
input [4:0] io_dis_bits_vat, // @[Backend.scala:15:14]
input [2:0] io_dis_bits_rm, // @[Backend.scala:15:14]
input [1:0] io_dis_bits_emul, // @[Backend.scala:15:14]
input [15:0] io_dis_bits_debug_id, // @[Backend.scala:15:14]
input [1:0] io_dis_bits_mop, // @[Backend.scala:15:14]
output io_vmu_lresp_ready, // @[Backend.scala:15:14]
input io_vmu_lresp_valid, // @[Backend.scala:15:14]
input [63:0] io_vmu_lresp_bits_data, // @[Backend.scala:15:14]
input [15:0] io_vmu_lresp_bits_debug_id, // @[Backend.scala:15:14]
input io_vmu_sdata_ready, // @[Backend.scala:15:14]
output io_vmu_sdata_valid, // @[Backend.scala:15:14]
output [63:0] io_vmu_sdata_bits_stdata, // @[Backend.scala:15:14]
output [7:0] io_vmu_sdata_bits_stmask, // @[Backend.scala:15:14]
output [15:0] io_vmu_sdata_bits_debug_id, // @[Backend.scala:15:14]
output io_vmu_mask_pop_ready, // @[Backend.scala:15:14]
input io_vmu_mask_pop_valid, // @[Backend.scala:15:14]
output io_vmu_mask_data_0, // @[Backend.scala:15:14]
output io_vmu_index_pop_ready, // @[Backend.scala:15:14]
input io_vmu_index_pop_valid, // @[Backend.scala:15:14]
input [2:0] io_vmu_index_pop_bits_tail, // @[Backend.scala:15:14]
output [7:0] io_vmu_index_data_0, // @[Backend.scala:15:14]
output [7:0] io_vmu_index_data_1, // @[Backend.scala:15:14]
output [7:0] io_vmu_index_data_2, // @[Backend.scala:15:14]
output [7:0] io_vmu_index_data_3, // @[Backend.scala:15:14]
output [7:0] io_vmu_index_data_4, // @[Backend.scala:15:14]
output [7:0] io_vmu_index_data_5, // @[Backend.scala:15:14]
output [7:0] io_vmu_index_data_6, // @[Backend.scala:15:14]
output [7:0] io_vmu_index_data_7, // @[Backend.scala:15:14]
output io_busy, // @[Backend.scala:15:14]
output io_index_access_ready, // @[Backend.scala:15:14]
input io_index_access_valid, // @[Backend.scala:15:14]
input [4:0] io_index_access_vrs, // @[Backend.scala:15:14]
input [8:0] io_index_access_eidx, // @[Backend.scala:15:14]
input [1:0] io_index_access_eew, // @[Backend.scala:15:14]
output [63:0] io_index_access_idx, // @[Backend.scala:15:14]
output io_mask_access_ready, // @[Backend.scala:15:14]
input io_mask_access_valid, // @[Backend.scala:15:14]
input [8:0] io_mask_access_eidx, // @[Backend.scala:15:14]
output io_mask_access_mask, // @[Backend.scala:15:14]
input io_scalar_resp_ready, // @[Backend.scala:15:14]
output io_scalar_resp_valid, // @[Backend.scala:15:14]
output [63:0] io_scalar_resp_bits_data, // @[Backend.scala:15:14]
output io_scalar_resp_bits_fp, // @[Backend.scala:15:14]
output [1:0] io_scalar_resp_bits_size, // @[Backend.scala:15:14]
output [4:0] io_scalar_resp_bits_rd, // @[Backend.scala:15:14]
output io_set_vxsat, // @[Backend.scala:15:14]
output io_set_fflags_valid, // @[Backend.scala:15:14]
output [4:0] io_set_fflags_bits, // @[Backend.scala:15:14]
input io_fp_req_ready, // @[Backend.scala:15:14]
output io_fp_req_valid, // @[Backend.scala:15:14]
output io_fp_req_bits_ren2, // @[Backend.scala:15:14]
output [1:0] io_fp_req_bits_typeTagIn, // @[Backend.scala:15:14]
output [1:0] io_fp_req_bits_typeTagOut, // @[Backend.scala:15:14]
output io_fp_req_bits_fromint, // @[Backend.scala:15:14]
output io_fp_req_bits_toint, // @[Backend.scala:15:14]
output io_fp_req_bits_fastpipe, // @[Backend.scala:15:14]
output io_fp_req_bits_div, // @[Backend.scala:15:14]
output io_fp_req_bits_sqrt, // @[Backend.scala:15:14]
output io_fp_req_bits_wflags, // @[Backend.scala:15:14]
output [2:0] io_fp_req_bits_rm, // @[Backend.scala:15:14]
output [1:0] io_fp_req_bits_typ, // @[Backend.scala:15:14]
output [64:0] io_fp_req_bits_in1, // @[Backend.scala:15:14]
output [64:0] io_fp_req_bits_in2, // @[Backend.scala:15:14]
input io_fp_resp_valid, // @[Backend.scala:15:14]
input [64:0] io_fp_resp_bits_data, // @[Backend.scala:15:14]
input [4:0] io_vat_tail, // @[Backend.scala:15:14]
input [4:0] io_vat_head, // @[Backend.scala:15:14]
output io_vat_release_0_valid, // @[Backend.scala:15:14]
output [4:0] io_vat_release_0_bits, // @[Backend.scala:15:14]
output io_vat_release_1_valid, // @[Backend.scala:15:14]
output [4:0] io_vat_release_1_bits, // @[Backend.scala:15:14]
output io_vat_release_2_valid, // @[Backend.scala:15:14]
output [4:0] io_vat_release_2_bits // @[Backend.scala:15:14]
);
wire vm_busy; // @[Backend.scala:436:31]
wire issq_stall_3; // @[Backend.scala:223:47]
wire issq_stall_2; // @[Backend.scala:223:47]
wire issq_stall_1; // @[Backend.scala:223:47]
wire issq_stall_0; // @[Backend.scala:223:47]
wire _scalar_write_arb_io_in_0_ready; // @[Backend.scala:443:32]
wire _perm_q_io_enq_ready; // @[Backend.scala:370:22]
wire _perm_q_io_deq_valid; // @[Backend.scala:370:22]
wire [63:0] _perm_q_io_deq_bits_rvs2_data; // @[Backend.scala:370:22]
wire [7:0] _perm_q_io_deq_bits_eidx; // @[Backend.scala:370:22]
wire [1:0] _perm_q_io_deq_bits_rvs2_eew; // @[Backend.scala:370:22]
wire [8:0] _perm_q_io_deq_bits_vl; // @[Backend.scala:370:22]
wire _perm_q_io_deq_bits_tail; // @[Backend.scala:370:22]
wire _vmu_mask_q_io_push_ready; // @[Backend.scala:369:26]
wire _vmu_index_q_io_push_ready; // @[Backend.scala:368:27]
wire _vrf_io_read_2_0_req_ready; // @[Backend.scala:283:19]
wire [63:0] _vrf_io_read_2_0_resp; // @[Backend.scala:283:19]
wire _vrf_io_read_2_1_req_ready; // @[Backend.scala:283:19]
wire [63:0] _vrf_io_read_2_1_resp; // @[Backend.scala:283:19]
wire _vrf_io_read_1_0_req_ready; // @[Backend.scala:283:19]
wire [63:0] _vrf_io_read_1_0_resp; // @[Backend.scala:283:19]
wire _vrf_io_read_0_0_req_ready; // @[Backend.scala:283:19]
wire [63:0] _vrf_io_read_0_0_resp; // @[Backend.scala:283:19]
wire _vrf_io_read_0_1_req_ready; // @[Backend.scala:283:19]
wire [63:0] _vrf_io_read_0_1_resp; // @[Backend.scala:283:19]
wire _vrf_io_read_0_2_req_ready; // @[Backend.scala:283:19]
wire [63:0] _vrf_io_read_0_2_resp; // @[Backend.scala:283:19]
wire _vrf_io_mask_read_0_0_req_ready; // @[Backend.scala:283:19]
wire [63:0] _vrf_io_mask_read_0_0_resp; // @[Backend.scala:283:19]
wire _vrf_io_mask_read_0_1_req_ready; // @[Backend.scala:283:19]
wire [63:0] _vrf_io_mask_read_0_1_resp; // @[Backend.scala:283:19]
wire _vrf_io_mask_read_0_2_req_ready; // @[Backend.scala:283:19]
wire [63:0] _vrf_io_mask_read_0_2_resp; // @[Backend.scala:283:19]
wire _vrf_io_mask_read_0_3_req_ready; // @[Backend.scala:283:19]
wire [63:0] _vrf_io_mask_read_0_3_resp; // @[Backend.scala:283:19]
wire _vrf_io_mask_read_0_4_req_ready; // @[Backend.scala:283:19]
wire [63:0] _vrf_io_mask_read_0_4_resp; // @[Backend.scala:283:19]
wire _vrf_io_ll_writes_0_ready; // @[Backend.scala:283:19]
wire _vrf_io_ll_writes_2_ready; // @[Backend.scala:283:19]
wire _vxufp_int_io_iss_ready; // @[Backend.scala:69:51]
wire _vxufp_int_io_iter_hazards_0_valid; // @[Backend.scala:69:51]
wire [6:0] _vxufp_int_io_iter_hazards_0_bits_eg; // @[Backend.scala:69:51]
wire _vxufp_int_io_iter_hazards_1_valid; // @[Backend.scala:69:51]
wire [6:0] _vxufp_int_io_iter_hazards_1_bits_eg; // @[Backend.scala:69:51]
wire _vxufp_int_io_iter_write_valid; // @[Backend.scala:69:51]
wire [6:0] _vxufp_int_io_iter_write_bits_eg; // @[Backend.scala:69:51]
wire [63:0] _vxufp_int_io_iter_write_bits_data; // @[Backend.scala:69:51]
wire [63:0] _vxufp_int_io_iter_write_bits_mask; // @[Backend.scala:69:51]
wire _vxufp_int_io_pipe_write_valid; // @[Backend.scala:69:51]
wire [6:0] _vxufp_int_io_pipe_write_bits_eg; // @[Backend.scala:69:51]
wire [63:0] _vxufp_int_io_pipe_write_bits_data; // @[Backend.scala:69:51]
wire [63:0] _vxufp_int_io_pipe_write_bits_mask; // @[Backend.scala:69:51]
wire _vxufp_int_io_acc_write_valid; // @[Backend.scala:69:51]
wire [63:0] _vxufp_int_io_acc_write_bits_data; // @[Backend.scala:69:51]
wire [63:0] _vxufp_int_io_acc_write_bits_mask; // @[Backend.scala:69:51]
wire _vxufp_int_io_scalar_write_valid; // @[Backend.scala:69:51]
wire [63:0] _vxufp_int_io_scalar_write_bits_data; // @[Backend.scala:69:51]
wire _vxufp_int_io_scalar_write_bits_fp; // @[Backend.scala:69:51]
wire [1:0] _vxufp_int_io_scalar_write_bits_size; // @[Backend.scala:69:51]
wire [4:0] _vxufp_int_io_scalar_write_bits_rd; // @[Backend.scala:69:51]
wire _vxufp_int_io_pipe_hazards_0_valid; // @[Backend.scala:69:51]
wire [6:0] _vxufp_int_io_pipe_hazards_0_bits_eg; // @[Backend.scala:69:51]
wire _vxufp_int_io_pipe_hazards_1_valid; // @[Backend.scala:69:51]
wire [6:0] _vxufp_int_io_pipe_hazards_1_bits_eg; // @[Backend.scala:69:51]
wire _vxufp_int_io_pipe_hazards_2_valid; // @[Backend.scala:69:51]
wire [6:0] _vxufp_int_io_pipe_hazards_2_bits_eg; // @[Backend.scala:69:51]
wire _vxufp_int_io_pipe_hazards_3_valid; // @[Backend.scala:69:51]
wire [6:0] _vxufp_int_io_pipe_hazards_3_bits_eg; // @[Backend.scala:69:51]
wire _vxufp_int_io_set_fflags_valid; // @[Backend.scala:69:51]
wire [4:0] _vxufp_int_io_set_fflags_bits; // @[Backend.scala:69:51]
wire _vxufp_int_io_busy; // @[Backend.scala:69:51]
wire _vxsfp_int_io_dis_ready; // @[Backend.scala:63:11]
wire _vxsfp_int_io_seq_hazard_valid; // @[Backend.scala:63:11]
wire [4:0] _vxsfp_int_io_seq_hazard_bits_vat; // @[Backend.scala:63:11]
wire [127:0] _vxsfp_int_io_seq_hazard_bits_rintent; // @[Backend.scala:63:11]
wire [127:0] _vxsfp_int_io_seq_hazard_bits_wintent; // @[Backend.scala:63:11]
wire [4:0] _vxsfp_int_io_vat; // @[Backend.scala:63:11]
wire _vxsfp_int_io_busy; // @[Backend.scala:63:11]
wire _vxsfp_int_io_rvs1_req_valid; // @[Backend.scala:63:11]
wire [6:0] _vxsfp_int_io_rvs1_req_bits_eg; // @[Backend.scala:63:11]
wire _vxsfp_int_io_rvs1_req_bits_oldest; // @[Backend.scala:63:11]
wire _vxsfp_int_io_rvs2_req_valid; // @[Backend.scala:63:11]
wire [6:0] _vxsfp_int_io_rvs2_req_bits_eg; // @[Backend.scala:63:11]
wire _vxsfp_int_io_rvs2_req_bits_oldest; // @[Backend.scala:63:11]
wire _vxsfp_int_io_rvd_req_valid; // @[Backend.scala:63:11]
wire [6:0] _vxsfp_int_io_rvd_req_bits_eg; // @[Backend.scala:63:11]
wire _vxsfp_int_io_rvd_req_bits_oldest; // @[Backend.scala:63:11]
wire _vxsfp_int_io_rvm_req_valid; // @[Backend.scala:63:11]
wire [6:0] _vxsfp_int_io_rvm_req_bits_eg; // @[Backend.scala:63:11]
wire _vxsfp_int_io_rvm_req_bits_oldest; // @[Backend.scala:63:11]
wire _vxsfp_int_io_perm_req_valid; // @[Backend.scala:63:11]
wire [2:0] _vxsfp_int_io_perm_req_bits_head; // @[Backend.scala:63:11]
wire [2:0] _vxsfp_int_io_perm_req_bits_tail; // @[Backend.scala:63:11]
wire _vxsfp_int_io_iss_valid; // @[Backend.scala:63:11]
wire [7:0] _vxsfp_int_io_iss_bits_eidx; // @[Backend.scala:63:11]
wire [8:0] _vxsfp_int_io_iss_bits_vl; // @[Backend.scala:63:11]
wire [63:0] _vxsfp_int_io_iss_bits_rvs1_data; // @[Backend.scala:63:11]
wire [63:0] _vxsfp_int_io_iss_bits_rvs2_data; // @[Backend.scala:63:11]
wire [63:0] _vxsfp_int_io_iss_bits_rvd_data; // @[Backend.scala:63:11]
wire [63:0] _vxsfp_int_io_iss_bits_rvm_data; // @[Backend.scala:63:11]
wire [63:0] _vxsfp_int_io_iss_bits_rvs1_elem; // @[Backend.scala:63:11]
wire [63:0] _vxsfp_int_io_iss_bits_rvs2_elem; // @[Backend.scala:63:11]
wire [1:0] _vxsfp_int_io_iss_bits_rvs1_eew; // @[Backend.scala:63:11]
wire [1:0] _vxsfp_int_io_iss_bits_rvs2_eew; // @[Backend.scala:63:11]
wire [1:0] _vxsfp_int_io_iss_bits_rvd_eew; // @[Backend.scala:63:11]
wire [1:0] _vxsfp_int_io_iss_bits_vd_eew; // @[Backend.scala:63:11]
wire [7:0] _vxsfp_int_io_iss_bits_rmask; // @[Backend.scala:63:11]
wire [7:0] _vxsfp_int_io_iss_bits_wmask; // @[Backend.scala:63:11]
wire [63:0] _vxsfp_int_io_iss_bits_full_tail_mask; // @[Backend.scala:63:11]
wire [6:0] _vxsfp_int_io_iss_bits_wvd_eg; // @[Backend.scala:63:11]
wire [2:0] _vxsfp_int_io_iss_bits_funct3; // @[Backend.scala:63:11]
wire [5:0] _vxsfp_int_io_iss_bits_funct6; // @[Backend.scala:63:11]
wire [4:0] _vxsfp_int_io_iss_bits_rs1; // @[Backend.scala:63:11]
wire [4:0] _vxsfp_int_io_iss_bits_rs2; // @[Backend.scala:63:11]
wire [4:0] _vxsfp_int_io_iss_bits_rd; // @[Backend.scala:63:11]
wire _vxsfp_int_io_iss_bits_vm; // @[Backend.scala:63:11]
wire _vxsfp_int_io_iss_bits_head; // @[Backend.scala:63:11]
wire _vxsfp_int_io_iss_bits_tail; // @[Backend.scala:63:11]
wire _vxsfp_int_io_iss_bits_acc; // @[Backend.scala:63:11]
wire [2:0] _vxsfp_int_io_iss_bits_rm; // @[Backend.scala:63:11]
wire _vps_io_dis_ready; // @[Backend.scala:61:19]
wire _vps_io_seq_hazard_valid; // @[Backend.scala:61:19]
wire [4:0] _vps_io_seq_hazard_bits_vat; // @[Backend.scala:61:19]
wire [127:0] _vps_io_seq_hazard_bits_rintent; // @[Backend.scala:61:19]
wire [4:0] _vps_io_vat; // @[Backend.scala:61:19]
wire _vps_io_busy; // @[Backend.scala:61:19]
wire _vps_io_rvs2_req_valid; // @[Backend.scala:61:19]
wire [6:0] _vps_io_rvs2_req_bits_eg; // @[Backend.scala:61:19]
wire _vps_io_rvs2_req_bits_oldest; // @[Backend.scala:61:19]
wire _vps_io_rvm_req_valid; // @[Backend.scala:61:19]
wire [6:0] _vps_io_rvm_req_bits_eg; // @[Backend.scala:61:19]
wire _vps_io_rvm_req_bits_oldest; // @[Backend.scala:61:19]
wire _vps_io_iss_valid; // @[Backend.scala:61:19]
wire _vps_io_iss_bits_renv2; // @[Backend.scala:61:19]
wire _vps_io_iss_bits_renvm; // @[Backend.scala:61:19]
wire [63:0] _vps_io_iss_bits_rvs2_data; // @[Backend.scala:61:19]
wire [7:0] _vps_io_iss_bits_eidx; // @[Backend.scala:61:19]
wire [1:0] _vps_io_iss_bits_rvs2_eew; // @[Backend.scala:61:19]
wire [63:0] _vps_io_iss_bits_rvm_data; // @[Backend.scala:61:19]
wire _vps_io_iss_bits_vmu; // @[Backend.scala:61:19]
wire [8:0] _vps_io_iss_bits_vl; // @[Backend.scala:61:19]
wire _vps_io_iss_bits_tail; // @[Backend.scala:61:19]
wire _vss_io_dis_ready; // @[Backend.scala:60:19]
wire _vss_io_seq_hazard_valid; // @[Backend.scala:60:19]
wire [4:0] _vss_io_seq_hazard_bits_vat; // @[Backend.scala:60:19]
wire [127:0] _vss_io_seq_hazard_bits_rintent; // @[Backend.scala:60:19]
wire [4:0] _vss_io_vat; // @[Backend.scala:60:19]
wire _vss_io_busy; // @[Backend.scala:60:19]
wire _vss_io_rvd_req_valid; // @[Backend.scala:60:19]
wire [6:0] _vss_io_rvd_req_bits_eg; // @[Backend.scala:60:19]
wire _vss_io_rvd_req_bits_oldest; // @[Backend.scala:60:19]
wire _vss_io_rvm_req_valid; // @[Backend.scala:60:19]
wire [6:0] _vss_io_rvm_req_bits_eg; // @[Backend.scala:60:19]
wire _vss_io_rvm_req_bits_oldest; // @[Backend.scala:60:19]
wire _vss_io_iss_valid; // @[Backend.scala:60:19]
wire _vss_io_iss_bits_tail; // @[Backend.scala:60:19]
wire _vls_io_dis_ready; // @[Backend.scala:59:19]
wire _vls_io_seq_hazard_valid; // @[Backend.scala:59:19]
wire [4:0] _vls_io_seq_hazard_bits_vat; // @[Backend.scala:59:19]
wire [127:0] _vls_io_seq_hazard_bits_rintent; // @[Backend.scala:59:19]
wire [127:0] _vls_io_seq_hazard_bits_wintent; // @[Backend.scala:59:19]
wire [4:0] _vls_io_vat; // @[Backend.scala:59:19]
wire _vls_io_busy; // @[Backend.scala:59:19]
wire _vls_io_rvm_req_valid; // @[Backend.scala:59:19]
wire [6:0] _vls_io_rvm_req_bits_eg; // @[Backend.scala:59:19]
wire _vls_io_rvm_req_bits_oldest; // @[Backend.scala:59:19]
wire _vls_io_iss_valid; // @[Backend.scala:59:19]
wire [6:0] _vls_io_iss_bits_wvd_eg; // @[Backend.scala:59:19]
wire [7:0] _vls_io_iss_bits_wmask; // @[Backend.scala:59:19]
wire _vls_io_iss_bits_tail; // @[Backend.scala:59:19]
wire [15:0] _vls_io_iss_bits_debug_id; // @[Backend.scala:59:19]
wire _vxissq_fp_int_io_enq_ready; // @[Backend.scala:57:43]
wire _vxissq_fp_int_io_deq_valid; // @[Backend.scala:57:43]
wire [31:0] _vxissq_fp_int_io_deq_bits_bits; // @[Backend.scala:57:43]
wire [8:0] _vxissq_fp_int_io_deq_bits_vconfig_vl; // @[Backend.scala:57:43]
wire [2:0] _vxissq_fp_int_io_deq_bits_vconfig_vtype_vsew; // @[Backend.scala:57:43]
wire _vxissq_fp_int_io_deq_bits_vconfig_vtype_vlmul_sign; // @[Backend.scala:57:43]
wire [1:0] _vxissq_fp_int_io_deq_bits_vconfig_vtype_vlmul_mag; // @[Backend.scala:57:43]
wire [7:0] _vxissq_fp_int_io_deq_bits_vstart; // @[Backend.scala:57:43]
wire [63:0] _vxissq_fp_int_io_deq_bits_rs1_data; // @[Backend.scala:57:43]
wire [4:0] _vxissq_fp_int_io_deq_bits_vat; // @[Backend.scala:57:43]
wire [2:0] _vxissq_fp_int_io_deq_bits_rm; // @[Backend.scala:57:43]
wire [1:0] _vxissq_fp_int_io_deq_bits_emul; // @[Backend.scala:57:43]
wire _vxissq_fp_int_io_deq_bits_reduction; // @[Backend.scala:57:43]
wire _vxissq_fp_int_io_deq_bits_scalar_to_vd0; // @[Backend.scala:57:43]
wire _vxissq_fp_int_io_deq_bits_wide_vd; // @[Backend.scala:57:43]
wire _vxissq_fp_int_io_deq_bits_wide_vs2; // @[Backend.scala:57:43]
wire _vxissq_fp_int_io_deq_bits_writes_mask; // @[Backend.scala:57:43]
wire _vxissq_fp_int_io_deq_bits_reads_vs1_mask; // @[Backend.scala:57:43]
wire _vxissq_fp_int_io_deq_bits_reads_vs2_mask; // @[Backend.scala:57:43]
wire _vxissq_fp_int_io_deq_bits_renv1; // @[Backend.scala:57:43]
wire _vxissq_fp_int_io_deq_bits_renv2; // @[Backend.scala:57:43]
wire _vxissq_fp_int_io_deq_bits_renvd; // @[Backend.scala:57:43]
wire _vxissq_fp_int_io_deq_bits_renvm; // @[Backend.scala:57:43]
wire _vxissq_fp_int_io_deq_bits_wvd; // @[Backend.scala:57:43]
wire _vxissq_fp_int_io_deq_bits_seq; // @[Backend.scala:57:43]
wire _vxissq_fp_int_io_hazards_0_valid; // @[Backend.scala:57:43]
wire [4:0] _vxissq_fp_int_io_hazards_0_bits_vat; // @[Backend.scala:57:43]
wire [31:0] _vxissq_fp_int_io_hazards_0_bits_rintent; // @[Backend.scala:57:43]
wire [31:0] _vxissq_fp_int_io_hazards_0_bits_wintent; // @[Backend.scala:57:43]
wire _vxissq_fp_int_io_hazards_1_valid; // @[Backend.scala:57:43]
wire [4:0] _vxissq_fp_int_io_hazards_1_bits_vat; // @[Backend.scala:57:43]
wire [31:0] _vxissq_fp_int_io_hazards_1_bits_rintent; // @[Backend.scala:57:43]
wire [31:0] _vxissq_fp_int_io_hazards_1_bits_wintent; // @[Backend.scala:57:43]
wire _vxissq_fp_int_io_hazards_2_valid; // @[Backend.scala:57:43]
wire [4:0] _vxissq_fp_int_io_hazards_2_bits_vat; // @[Backend.scala:57:43]
wire [31:0] _vxissq_fp_int_io_hazards_2_bits_rintent; // @[Backend.scala:57:43]
wire [31:0] _vxissq_fp_int_io_hazards_2_bits_wintent; // @[Backend.scala:57:43]
wire _vpissq_io_enq_ready; // @[Backend.scala:56:22]
wire _vpissq_io_deq_valid; // @[Backend.scala:56:22]
wire [31:0] _vpissq_io_deq_bits_bits; // @[Backend.scala:56:22]
wire [8:0] _vpissq_io_deq_bits_vconfig_vl; // @[Backend.scala:56:22]
wire [2:0] _vpissq_io_deq_bits_vconfig_vtype_vsew; // @[Backend.scala:56:22]
wire _vpissq_io_deq_bits_vconfig_vtype_vlmul_sign; // @[Backend.scala:56:22]
wire [1:0] _vpissq_io_deq_bits_vconfig_vtype_vlmul_mag; // @[Backend.scala:56:22]
wire [7:0] _vpissq_io_deq_bits_vstart; // @[Backend.scala:56:22]
wire [63:0] _vpissq_io_deq_bits_rs1_data; // @[Backend.scala:56:22]
wire [4:0] _vpissq_io_deq_bits_vat; // @[Backend.scala:56:22]
wire [1:0] _vpissq_io_deq_bits_emul; // @[Backend.scala:56:22]
wire _vpissq_io_deq_bits_rs1_is_rs2; // @[Backend.scala:56:22]
wire _vpissq_io_deq_bits_renv2; // @[Backend.scala:56:22]
wire _vpissq_io_deq_bits_renvm; // @[Backend.scala:56:22]
wire _vpissq_io_deq_bits_seq; // @[Backend.scala:56:22]
wire _vsissq_io_enq_ready; // @[Backend.scala:55:22]
wire _vsissq_io_deq_valid; // @[Backend.scala:55:22]
wire [31:0] _vsissq_io_deq_bits_bits; // @[Backend.scala:55:22]
wire [8:0] _vsissq_io_deq_bits_vconfig_vl; // @[Backend.scala:55:22]
wire [2:0] _vsissq_io_deq_bits_vconfig_vtype_vsew; // @[Backend.scala:55:22]
wire [7:0] _vsissq_io_deq_bits_vstart; // @[Backend.scala:55:22]
wire [4:0] _vsissq_io_deq_bits_vat; // @[Backend.scala:55:22]
wire [1:0] _vsissq_io_deq_bits_emul; // @[Backend.scala:55:22]
wire [15:0] _vsissq_io_deq_bits_debug_id; // @[Backend.scala:55:22]
wire [1:0] _vsissq_io_deq_bits_mop; // @[Backend.scala:55:22]
wire _vsissq_io_deq_bits_seq; // @[Backend.scala:55:22]
wire _vsissq_io_hazards_0_valid; // @[Backend.scala:55:22]
wire [4:0] _vsissq_io_hazards_0_bits_vat; // @[Backend.scala:55:22]
wire [31:0] _vsissq_io_hazards_0_bits_rintent; // @[Backend.scala:55:22]
wire [31:0] _vsissq_io_hazards_0_bits_wintent; // @[Backend.scala:55:22]
wire _vsissq_io_hazards_1_valid; // @[Backend.scala:55:22]
wire [4:0] _vsissq_io_hazards_1_bits_vat; // @[Backend.scala:55:22]
wire [31:0] _vsissq_io_hazards_1_bits_rintent; // @[Backend.scala:55:22]
wire [31:0] _vsissq_io_hazards_1_bits_wintent; // @[Backend.scala:55:22]
wire _vsissq_io_hazards_2_valid; // @[Backend.scala:55:22]
wire [4:0] _vsissq_io_hazards_2_bits_vat; // @[Backend.scala:55:22]
wire [31:0] _vsissq_io_hazards_2_bits_rintent; // @[Backend.scala:55:22]
wire [31:0] _vsissq_io_hazards_2_bits_wintent; // @[Backend.scala:55:22]
wire _vlissq_io_enq_ready; // @[Backend.scala:54:22]
wire _vlissq_io_deq_valid; // @[Backend.scala:54:22]
wire [31:0] _vlissq_io_deq_bits_bits; // @[Backend.scala:54:22]
wire [8:0] _vlissq_io_deq_bits_vconfig_vl; // @[Backend.scala:54:22]
wire [2:0] _vlissq_io_deq_bits_vconfig_vtype_vsew; // @[Backend.scala:54:22]
wire [7:0] _vlissq_io_deq_bits_vstart; // @[Backend.scala:54:22]
wire [2:0] _vlissq_io_deq_bits_segstart; // @[Backend.scala:54:22]
wire [2:0] _vlissq_io_deq_bits_segend; // @[Backend.scala:54:22]
wire [4:0] _vlissq_io_deq_bits_vat; // @[Backend.scala:54:22]
wire [1:0] _vlissq_io_deq_bits_emul; // @[Backend.scala:54:22]
wire [15:0] _vlissq_io_deq_bits_debug_id; // @[Backend.scala:54:22]
wire [1:0] _vlissq_io_deq_bits_mop; // @[Backend.scala:54:22]
wire _vlissq_io_deq_bits_seq; // @[Backend.scala:54:22]
wire _vlissq_io_hazards_0_valid; // @[Backend.scala:54:22]
wire [4:0] _vlissq_io_hazards_0_bits_vat; // @[Backend.scala:54:22]
wire [31:0] _vlissq_io_hazards_0_bits_rintent; // @[Backend.scala:54:22]
wire [31:0] _vlissq_io_hazards_0_bits_wintent; // @[Backend.scala:54:22]
wire _vlissq_io_hazards_1_valid; // @[Backend.scala:54:22]
wire [4:0] _vlissq_io_hazards_1_bits_vat; // @[Backend.scala:54:22]
wire [31:0] _vlissq_io_hazards_1_bits_rintent; // @[Backend.scala:54:22]
wire [31:0] _vlissq_io_hazards_1_bits_wintent; // @[Backend.scala:54:22]
wire _vlissq_io_hazards_2_valid; // @[Backend.scala:54:22]
wire [4:0] _vlissq_io_hazards_2_bits_vat; // @[Backend.scala:54:22]
wire [31:0] _vlissq_io_hazards_2_bits_rintent; // @[Backend.scala:54:22]
wire [31:0] _vlissq_io_hazards_2_bits_wintent; // @[Backend.scala:54:22]
wire _perm_buffer_io_push_ready; // @[Backend.scala:49:27]
wire _perm_buffer_io_pop_ready; // @[Backend.scala:49:27]
wire [7:0] _perm_buffer_io_pop_data_0; // @[Backend.scala:49:27]
wire [7:0] _perm_buffer_io_pop_data_1; // @[Backend.scala:49:27]
wire [7:0] _perm_buffer_io_pop_data_2; // @[Backend.scala:49:27]
wire [7:0] _perm_buffer_io_pop_data_3; // @[Backend.scala:49:27]
wire [7:0] _perm_buffer_io_pop_data_4; // @[Backend.scala:49:27]
wire [7:0] _perm_buffer_io_pop_data_5; // @[Backend.scala:49:27]
wire [7:0] _perm_buffer_io_pop_data_6; // @[Backend.scala:49:27]
wire [7:0] _perm_buffer_io_pop_data_7; // @[Backend.scala:49:27]
wire _vdq_io_enq_ready; // @[Backend.scala:46:19]
wire _vdq_io_deq_valid; // @[Backend.scala:46:19]
wire [31:0] _vdq_io_deq_bits_bits; // @[Backend.scala:46:19]
wire [8:0] _vdq_io_deq_bits_vconfig_vl; // @[Backend.scala:46:19]
wire [2:0] _vdq_io_deq_bits_vconfig_vtype_vsew; // @[Backend.scala:46:19]
wire _vdq_io_deq_bits_vconfig_vtype_vlmul_sign; // @[Backend.scala:46:19]
wire [1:0] _vdq_io_deq_bits_vconfig_vtype_vlmul_mag; // @[Backend.scala:46:19]
wire [7:0] _vdq_io_deq_bits_vstart; // @[Backend.scala:46:19]
wire [2:0] _vdq_io_deq_bits_segstart; // @[Backend.scala:46:19]
wire [2:0] _vdq_io_deq_bits_segend; // @[Backend.scala:46:19]
wire [63:0] _vdq_io_deq_bits_rs1_data; // @[Backend.scala:46:19]
wire [4:0] _vdq_io_deq_bits_vat; // @[Backend.scala:46:19]
wire [2:0] _vdq_io_deq_bits_rm; // @[Backend.scala:46:19]
wire [1:0] _vdq_io_deq_bits_emul; // @[Backend.scala:46:19]
wire [15:0] _vdq_io_deq_bits_debug_id; // @[Backend.scala:46:19]
wire [1:0] _vdq_io_deq_bits_mop; // @[Backend.scala:46:19]
wire _vdq_io_peek_0_valid; // @[Backend.scala:46:19]
wire [31:0] _vdq_io_peek_0_bits_bits; // @[Backend.scala:46:19]
wire _vdq_io_peek_1_valid; // @[Backend.scala:46:19]
wire [31:0] _vdq_io_peek_1_bits_bits; // @[Backend.scala:46:19]
wire _vdq_io_peek_2_valid; // @[Backend.scala:46:19]
wire [31:0] _vdq_io_peek_2_bits_bits; // @[Backend.scala:46:19]
wire _vdq_io_peek_3_valid; // @[Backend.scala:46:19]
wire [31:0] _vdq_io_peek_3_bits_bits; // @[Backend.scala:46:19]
wire [7:0][1:0] _GEN = '{2'h3, 2'h3, 2'h3, 2'h3, 2'h2, 2'h2, 2'h1, 2'h0};
wire [1:0] vsissq_io_enq_bits_nf_log2 = _GEN[_vdq_io_deq_bits_bits[31:29]]; // @[Bundles.scala:63:16]
wire [6:0] _vpissq_io_enq_bits_rs1_is_rs2_WIRE = {1'h0, _vdq_io_deq_bits_bits[31:26]}; // @[Bundles.scala:75:20, :84:35]
wire [18:0] decode_invInputs = ~{_vdq_io_deq_bits_bits[19:15], _vdq_io_deq_bits_bits[24:20], _vdq_io_deq_bits_bits[14:12], _vdq_io_deq_bits_bits[31:26]}; // @[pla.scala:78:21]
wire [1:0] _decode_andMatrixOutputs_T = {decode_invInputs[3], decode_invInputs[4]}; // @[pla.scala:78:21, :91:29, :98:53]
wire [2:0] _decode_andMatrixOutputs_T_2 = {decode_invInputs[1], decode_invInputs[6], decode_invInputs[7]}; // @[pla.scala:78:21, :91:29, :98:53]
wire [1:0] _decode_andMatrixOutputs_T_20 = {decode_invInputs[1], _vdq_io_deq_bits_bits[29]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53]
wire [5:0] _decode_andMatrixOutputs_T_31 = {_vdq_io_deq_bits_bits[26], _vdq_io_deq_bits_bits[27], _vdq_io_deq_bits_bits[28], decode_invInputs[3], _vdq_io_deq_bits_bits[30], decode_invInputs[7]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53]
wire [2:0] _decode_andMatrixOutputs_T_32 = {_vdq_io_deq_bits_bits[29], _vdq_io_deq_bits_bits[30], decode_invInputs[5]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53]
wire [4:0] _decode_andMatrixOutputs_T_41 = {_vdq_io_deq_bits_bits[28], _vdq_io_deq_bits_bits[29], _vdq_io_deq_bits_bits[31], decode_invInputs[6], decode_invInputs[7]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53]
wire [1:0] _decode_andMatrixOutputs_T_42 = {_vdq_io_deq_bits_bits[30], _vdq_io_deq_bits_bits[31]}; // @[pla.scala:90:45, :98:53]
wire [6:0] _decode_andMatrixOutputs_T_50 = {decode_invInputs[1], decode_invInputs[3], _vdq_io_deq_bits_bits[30], decode_invInputs[5], _vdq_io_deq_bits_bits[12], decode_invInputs[7], decode_invInputs[8]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53]
wire [4:0] _decode_andMatrixOutputs_T_52 = {_vdq_io_deq_bits_bits[29], decode_invInputs[4], _vdq_io_deq_bits_bits[31], _vdq_io_deq_bits_bits[12], decode_invInputs[7]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53]
wire [4:0] _decode_andMatrixOutputs_T_56 = {decode_invInputs[3], decode_invInputs[4], decode_invInputs[5], decode_invInputs[6], _vdq_io_deq_bits_bits[13]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53]
wire [7:0] _decode_andMatrixOutputs_T_61 = {decode_invInputs[1], decode_invInputs[2], decode_invInputs[3], _vdq_io_deq_bits_bits[30], decode_invInputs[5], decode_invInputs[6], _vdq_io_deq_bits_bits[13], decode_invInputs[8]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53]
wire [5:0] _decode_andMatrixOutputs_T_63 = {decode_invInputs[0], _vdq_io_deq_bits_bits[28], _vdq_io_deq_bits_bits[30], decode_invInputs[5], decode_invInputs[6], _vdq_io_deq_bits_bits[13]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53]
wire [6:0] _decode_andMatrixOutputs_T_64 = {decode_invInputs[0], _vdq_io_deq_bits_bits[28], _vdq_io_deq_bits_bits[30], decode_invInputs[5], decode_invInputs[6], _vdq_io_deq_bits_bits[13], decode_invInputs[18]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53]
wire [4:0] _decode_andMatrixOutputs_T_66 = {_vdq_io_deq_bits_bits[29], _vdq_io_deq_bits_bits[30], decode_invInputs[5], decode_invInputs[6], _vdq_io_deq_bits_bits[13]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53]
wire [4:0] _decode_andMatrixOutputs_T_73 = {_vdq_io_deq_bits_bits[29], decode_invInputs[4], _vdq_io_deq_bits_bits[31], decode_invInputs[6], _vdq_io_deq_bits_bits[13]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53]
wire [2:0] _decode_andMatrixOutputs_T_76 = {decode_invInputs[1], _vdq_io_deq_bits_bits[12], _vdq_io_deq_bits_bits[13]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53]
wire [4:0] _decode_andMatrixOutputs_T_80 = {_vdq_io_deq_bits_bits[27], decode_invInputs[3], _vdq_io_deq_bits_bits[30], _vdq_io_deq_bits_bits[12], _vdq_io_deq_bits_bits[13]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53]
wire [4:0] _decode_andMatrixOutputs_T_82 = {_vdq_io_deq_bits_bits[28], _vdq_io_deq_bits_bits[29], _vdq_io_deq_bits_bits[31], _vdq_io_deq_bits_bits[12], _vdq_io_deq_bits_bits[13]}; // @[pla.scala:90:45, :98:53]
wire [6:0] _decode_andMatrixOutputs_T_101 = {decode_invInputs[2], _vdq_io_deq_bits_bits[30], decode_invInputs[5], decode_invInputs[6], _vdq_io_deq_bits_bits[13], decode_invInputs[8], _vdq_io_deq_bits_bits[19]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53]
wire _vxissq_fp_int_io_enq_valid_T = issq_stall_0 | issq_stall_1; // @[Backend.scala:223:47]
wire _older_issq_rintents_T = _vsissq_io_hazards_0_bits_vat < _vls_io_vat; // @[Parameters.scala:356:53]
wire _older_issq_rintents_T_382 = _vsissq_io_hazards_0_bits_vat < io_vat_tail; // @[Parameters.scala:356:65]
wire _older_seq_rintents_T_17 = _vls_io_vat < io_vat_tail; // @[Parameters.scala:356:79]
wire _older_issq_rintents_T_7 = _vsissq_io_hazards_1_bits_vat < _vls_io_vat; // @[Parameters.scala:356:53]
wire _older_issq_rintents_T_389 = _vsissq_io_hazards_1_bits_vat < io_vat_tail; // @[Parameters.scala:356:65]
wire _older_issq_rintents_T_14 = _vsissq_io_hazards_2_bits_vat < _vls_io_vat; // @[Parameters.scala:356:53]
wire _older_issq_rintents_T_396 = _vsissq_io_hazards_2_bits_vat < io_vat_tail; // @[Parameters.scala:356:65]
wire _older_issq_rintents_T_21 = _vxissq_fp_int_io_hazards_0_bits_vat < _vls_io_vat; // @[Parameters.scala:356:53]
wire _older_issq_rintents_T_267 = _vxissq_fp_int_io_hazards_0_bits_vat < io_vat_tail; // @[Parameters.scala:356:65]
wire _older_issq_rintents_T_28 = _vxissq_fp_int_io_hazards_1_bits_vat < _vls_io_vat; // @[Parameters.scala:356:53]
wire _older_issq_rintents_T_274 = _vxissq_fp_int_io_hazards_1_bits_vat < io_vat_tail; // @[Parameters.scala:356:65]
wire _older_issq_rintents_T_35 = _vxissq_fp_int_io_hazards_2_bits_vat < _vls_io_vat; // @[Parameters.scala:356:53]
wire _older_issq_rintents_T_281 = _vxissq_fp_int_io_hazards_2_bits_vat < io_vat_tail; // @[Parameters.scala:356:65]
wire [31:0] _older_issq_wintents_T_47 = ((_older_issq_rintents_T ^ _older_issq_rintents_T_382 ^ _older_seq_rintents_T_17) & _vsissq_io_hazards_0_valid ? _vsissq_io_hazards_0_bits_wintent : 32'h0) | ((_older_issq_rintents_T_7 ^ _older_issq_rintents_T_389 ^ _older_seq_rintents_T_17) & _vsissq_io_hazards_1_valid ? _vsissq_io_hazards_1_bits_wintent : 32'h0) | ((_older_issq_rintents_T_14 ^ _older_issq_rintents_T_396 ^ _older_seq_rintents_T_17) & _vsissq_io_hazards_2_valid ? _vsissq_io_hazards_2_bits_wintent : 32'h0) | ((_older_issq_rintents_T_21 ^ _older_issq_rintents_T_267 ^ _older_seq_rintents_T_17) & _vxissq_fp_int_io_hazards_0_valid ? _vxissq_fp_int_io_hazards_0_bits_wintent : 32'h0) | ((_older_issq_rintents_T_28 ^ _older_issq_rintents_T_274 ^ _older_seq_rintents_T_17) & _vxissq_fp_int_io_hazards_1_valid ? _vxissq_fp_int_io_hazards_1_bits_wintent : 32'h0) | ((_older_issq_rintents_T_35 ^ _older_issq_rintents_T_281 ^ _older_seq_rintents_T_17) & _vxissq_fp_int_io_hazards_2_valid ? _vxissq_fp_int_io_hazards_2_bits_wintent : 32'h0); // @[Parameters.scala:356:{53,59,65,73,79}]
wire _older_seq_rintents_T_74 = _vss_io_seq_hazard_bits_vat < io_vat_tail; // @[Parameters.scala:356:65]
wire _older_seq_rintents_T_81 = _vps_io_seq_hazard_bits_vat < io_vat_tail; // @[Parameters.scala:356:65]
wire _older_seq_rintents_T_14 = _vxsfp_int_io_seq_hazard_bits_vat < _vls_io_vat; // @[Parameters.scala:356:53]
wire _older_seq_rintents_T_59 = _vxsfp_int_io_seq_hazard_bits_vat < io_vat_tail; // @[Parameters.scala:356:65]
wire [31:0] _older_issq_rintents_T_47 = ((_older_issq_rintents_T ^ _older_issq_rintents_T_382 ^ _older_seq_rintents_T_17) & _vsissq_io_hazards_0_valid ? _vsissq_io_hazards_0_bits_rintent : 32'h0) | ((_older_issq_rintents_T_7 ^ _older_issq_rintents_T_389 ^ _older_seq_rintents_T_17) & _vsissq_io_hazards_1_valid ? _vsissq_io_hazards_1_bits_rintent : 32'h0) | ((_older_issq_rintents_T_14 ^ _older_issq_rintents_T_396 ^ _older_seq_rintents_T_17) & _vsissq_io_hazards_2_valid ? _vsissq_io_hazards_2_bits_rintent : 32'h0) | ((_older_issq_rintents_T_21 ^ _older_issq_rintents_T_267 ^ _older_seq_rintents_T_17) & _vxissq_fp_int_io_hazards_0_valid ? _vxissq_fp_int_io_hazards_0_bits_rintent : 32'h0) | ((_older_issq_rintents_T_28 ^ _older_issq_rintents_T_274 ^ _older_seq_rintents_T_17) & _vxissq_fp_int_io_hazards_1_valid ? _vxissq_fp_int_io_hazards_1_bits_rintent : 32'h0) | ((_older_issq_rintents_T_35 ^ _older_issq_rintents_T_281 ^ _older_seq_rintents_T_17) & _vxissq_fp_int_io_hazards_2_valid ? _vxissq_fp_int_io_hazards_2_bits_rintent : 32'h0); // @[Parameters.scala:356:{53,59,65,73,79}]
wire [127:0] _older_pipe_writes_T_30 = 128'h1 << _vxufp_int_io_pipe_hazards_0_bits_eg; // @[OneHot.scala:58:35]
wire [127:0] _older_pipe_writes_T_32 = 128'h1 << _vxufp_int_io_pipe_hazards_1_bits_eg; // @[OneHot.scala:58:35]
wire [127:0] _older_pipe_writes_T_34 = 128'h1 << _vxufp_int_io_pipe_hazards_2_bits_eg; // @[OneHot.scala:58:35]
wire [127:0] _older_pipe_writes_T_36 = 128'h1 << _vxufp_int_io_pipe_hazards_3_bits_eg; // @[OneHot.scala:58:35]
wire [127:0] _older_iter_writes_T_12 = 128'h1 << _vxufp_int_io_iter_hazards_0_bits_eg; // @[OneHot.scala:58:35]
wire [127:0] _older_iter_writes_T_14 = 128'h1 << _vxufp_int_io_iter_hazards_1_bits_eg; // @[OneHot.scala:58:35]
wire accepts_0 = (_vdq_io_deq_bits_bits[6:0] == 7'h7 | _vdq_io_deq_bits_bits[6:0] == 7'h27) & ~(_vdq_io_deq_bits_bits[5]); // @[Bundles.scala:56:20]
assign issq_stall_0 = ~_vlissq_io_enq_ready & accepts_0; // @[LoadSequencer.scala:9:49]
wire _older_issq_rintents_T_361 = _vlissq_io_hazards_0_bits_vat < io_vat_tail; // @[Parameters.scala:356:65]
wire _older_seq_rintents_T_39 = _vss_io_vat < io_vat_tail; // @[Parameters.scala:356:79]
wire _older_issq_rintents_T_368 = _vlissq_io_hazards_1_bits_vat < io_vat_tail; // @[Parameters.scala:356:65]
wire _older_issq_rintents_T_375 = _vlissq_io_hazards_2_bits_vat < io_vat_tail; // @[Parameters.scala:356:65]
wire [31:0] _older_issq_wintents_T_159 = ((_vlissq_io_hazards_0_bits_vat < _vss_io_vat ^ _older_issq_rintents_T_361 ^ _older_seq_rintents_T_39) & _vlissq_io_hazards_0_valid ? _vlissq_io_hazards_0_bits_wintent : 32'h0) | ((_vlissq_io_hazards_1_bits_vat < _vss_io_vat ^ _older_issq_rintents_T_368 ^ _older_seq_rintents_T_39) & _vlissq_io_hazards_1_valid ? _vlissq_io_hazards_1_bits_wintent : 32'h0) | ((_vlissq_io_hazards_2_bits_vat < _vss_io_vat ^ _older_issq_rintents_T_375 ^ _older_seq_rintents_T_39) & _vlissq_io_hazards_2_valid ? _vlissq_io_hazards_2_bits_wintent : 32'h0) | ((_vxissq_fp_int_io_hazards_0_bits_vat < _vss_io_vat ^ _older_issq_rintents_T_267 ^ _older_seq_rintents_T_39) & _vxissq_fp_int_io_hazards_0_valid ? _vxissq_fp_int_io_hazards_0_bits_wintent : 32'h0) | ((_vxissq_fp_int_io_hazards_1_bits_vat < _vss_io_vat ^ _older_issq_rintents_T_274 ^ _older_seq_rintents_T_39) & _vxissq_fp_int_io_hazards_1_valid ? _vxissq_fp_int_io_hazards_1_bits_wintent : 32'h0) | ((_vxissq_fp_int_io_hazards_2_bits_vat < _vss_io_vat ^ _older_issq_rintents_T_281 ^ _older_seq_rintents_T_39) & _vxissq_fp_int_io_hazards_2_valid ? _vxissq_fp_int_io_hazards_2_bits_wintent : 32'h0); // @[Parameters.scala:356:{53,59,65,73,79}]
wire _older_seq_rintents_T_67 = _vls_io_seq_hazard_bits_vat < io_vat_tail; // @[Parameters.scala:356:65]
wire accepts_0_1 = (_vdq_io_deq_bits_bits[6:0] == 7'h7 | _vdq_io_deq_bits_bits[6:0] == 7'h27) & _vdq_io_deq_bits_bits[5]; // @[Bundles.scala:56:20]
assign issq_stall_1 = ~_vsissq_io_enq_ready & accepts_0_1; // @[StoreSequencer.scala:9:49]
wire _older_seq_rintents_T_61 = _vps_io_vat < io_vat_tail; // @[Parameters.scala:356:79]
wire [31:0] _older_issq_wintents_T_295 = ((_vlissq_io_hazards_0_bits_vat < _vps_io_vat ^ _older_issq_rintents_T_361 ^ _older_seq_rintents_T_61) & _vlissq_io_hazards_0_valid ? _vlissq_io_hazards_0_bits_wintent : 32'h0) | ((_vlissq_io_hazards_1_bits_vat < _vps_io_vat ^ _older_issq_rintents_T_368 ^ _older_seq_rintents_T_61) & _vlissq_io_hazards_1_valid ? _vlissq_io_hazards_1_bits_wintent : 32'h0) | ((_vlissq_io_hazards_2_bits_vat < _vps_io_vat ^ _older_issq_rintents_T_375 ^ _older_seq_rintents_T_61) & _vlissq_io_hazards_2_valid ? _vlissq_io_hazards_2_bits_wintent : 32'h0) | ((_vsissq_io_hazards_0_bits_vat < _vps_io_vat ^ _older_issq_rintents_T_382 ^ _older_seq_rintents_T_61) & _vsissq_io_hazards_0_valid ? _vsissq_io_hazards_0_bits_wintent : 32'h0) | ((_vsissq_io_hazards_1_bits_vat < _vps_io_vat ^ _older_issq_rintents_T_389 ^ _older_seq_rintents_T_61) & _vsissq_io_hazards_1_valid ? _vsissq_io_hazards_1_bits_wintent : 32'h0) | ((_vsissq_io_hazards_2_bits_vat < _vps_io_vat ^ _older_issq_rintents_T_396 ^ _older_seq_rintents_T_61) & _vsissq_io_hazards_2_valid ? _vsissq_io_hazards_2_bits_wintent : 32'h0) | ((_vxissq_fp_int_io_hazards_0_bits_vat < _vps_io_vat ^ _older_issq_rintents_T_267 ^ _older_seq_rintents_T_61) & _vxissq_fp_int_io_hazards_0_valid ? _vxissq_fp_int_io_hazards_0_bits_wintent : 32'h0) | ((_vxissq_fp_int_io_hazards_1_bits_vat < _vps_io_vat ^ _older_issq_rintents_T_274 ^ _older_seq_rintents_T_61) & _vxissq_fp_int_io_hazards_1_valid ? _vxissq_fp_int_io_hazards_1_bits_wintent : 32'h0) | ((_vxissq_fp_int_io_hazards_2_bits_vat < _vps_io_vat ^ _older_issq_rintents_T_281 ^ _older_seq_rintents_T_61) & _vxissq_fp_int_io_hazards_2_valid ? _vxissq_fp_int_io_hazards_2_bits_wintent : 32'h0); // @[Parameters.scala:356:{53,59,65,73,79}]
wire [8:0] accepts_arith_decode_invInputs = ~{_vdq_io_deq_bits_bits[14:12], _vdq_io_deq_bits_bits[31:26]}; // @[pla.scala:78:21]
wire accepts_0_2 = (_vdq_io_deq_bits_bits[6:0] == 7'h7 | _vdq_io_deq_bits_bits[6:0] == 7'h27) & ~(_vdq_io_deq_bits_bits[25]) & (|_vdq_io_deq_bits_mop) | (_vdq_io_deq_bits_bits[6:0] == 7'h7 | _vdq_io_deq_bits_bits[6:0] == 7'h27) & _vdq_io_deq_bits_mop[0] | ~(_vdq_io_deq_bits_bits[6:0] == 7'h7 | _vdq_io_deq_bits_bits[6:0] == 7'h27) & (|{&{accepts_arith_decode_invInputs[0], _vdq_io_deq_bits_bits[28], _vdq_io_deq_bits_bits[29], accepts_arith_decode_invInputs[4], accepts_arith_decode_invInputs[5], accepts_arith_decode_invInputs[6], accepts_arith_decode_invInputs[7], accepts_arith_decode_invInputs[8]}, &{_vdq_io_deq_bits_bits[27], _vdq_io_deq_bits_bits[28], _vdq_io_deq_bits_bits[29], accepts_arith_decode_invInputs[4], accepts_arith_decode_invInputs[5], _vdq_io_deq_bits_bits[12], _vdq_io_deq_bits_bits[13], accepts_arith_decode_invInputs[8]}, &{_vdq_io_deq_bits_bits[27], _vdq_io_deq_bits_bits[28], _vdq_io_deq_bits_bits[29], accepts_arith_decode_invInputs[4], accepts_arith_decode_invInputs[5], accepts_arith_decode_invInputs[6], _vdq_io_deq_bits_bits[14]}, &{_vdq_io_deq_bits_bits[27], _vdq_io_deq_bits_bits[28], _vdq_io_deq_bits_bits[29], accepts_arith_decode_invInputs[4], accepts_arith_decode_invInputs[5], accepts_arith_decode_invInputs[7], _vdq_io_deq_bits_bits[14]}}); // @[pla.scala:78:21, :90:45, :91:29, :98:{53,70}, :114:{19,36}]
assign issq_stall_2 = ~_vpissq_io_enq_ready & accepts_0_2; // @[PermuteSequencer.scala:14:{16,31}]
wire _older_issq_rintents_T_360 = _vlissq_io_hazards_0_bits_vat < _vxsfp_int_io_vat; // @[Parameters.scala:356:53]
wire _older_seq_rintents_T_83 = _vxsfp_int_io_vat < io_vat_tail; // @[Parameters.scala:356:79]
wire _older_issq_rintents_T_367 = _vlissq_io_hazards_1_bits_vat < _vxsfp_int_io_vat; // @[Parameters.scala:356:53]
wire _older_issq_rintents_T_374 = _vlissq_io_hazards_2_bits_vat < _vxsfp_int_io_vat; // @[Parameters.scala:356:53]
wire _older_issq_rintents_T_381 = _vsissq_io_hazards_0_bits_vat < _vxsfp_int_io_vat; // @[Parameters.scala:356:53]
wire _older_issq_rintents_T_388 = _vsissq_io_hazards_1_bits_vat < _vxsfp_int_io_vat; // @[Parameters.scala:356:53]
wire _older_issq_rintents_T_395 = _vsissq_io_hazards_2_bits_vat < _vxsfp_int_io_vat; // @[Parameters.scala:356:53]
wire [31:0] _older_issq_wintents_T_407 = ((_older_issq_rintents_T_360 ^ _older_issq_rintents_T_361 ^ _older_seq_rintents_T_83) & _vlissq_io_hazards_0_valid ? _vlissq_io_hazards_0_bits_wintent : 32'h0) | ((_older_issq_rintents_T_367 ^ _older_issq_rintents_T_368 ^ _older_seq_rintents_T_83) & _vlissq_io_hazards_1_valid ? _vlissq_io_hazards_1_bits_wintent : 32'h0) | ((_older_issq_rintents_T_374 ^ _older_issq_rintents_T_375 ^ _older_seq_rintents_T_83) & _vlissq_io_hazards_2_valid ? _vlissq_io_hazards_2_bits_wintent : 32'h0) | ((_older_issq_rintents_T_381 ^ _older_issq_rintents_T_382 ^ _older_seq_rintents_T_83) & _vsissq_io_hazards_0_valid ? _vsissq_io_hazards_0_bits_wintent : 32'h0) | ((_older_issq_rintents_T_388 ^ _older_issq_rintents_T_389 ^ _older_seq_rintents_T_83) & _vsissq_io_hazards_1_valid ? _vsissq_io_hazards_1_bits_wintent : 32'h0) | ((_older_issq_rintents_T_395 ^ _older_issq_rintents_T_396 ^ _older_seq_rintents_T_83) & _vsissq_io_hazards_2_valid ? _vsissq_io_hazards_2_bits_wintent : 32'h0); // @[Parameters.scala:356:{53,59,65,73,79}]
wire _older_seq_rintents_T_66 = _vls_io_seq_hazard_bits_vat < _vxsfp_int_io_vat; // @[Parameters.scala:356:53]
wire [31:0] _older_issq_rintents_T_407 = ((_older_issq_rintents_T_360 ^ _older_issq_rintents_T_361 ^ _older_seq_rintents_T_83) & _vlissq_io_hazards_0_valid ? _vlissq_io_hazards_0_bits_rintent : 32'h0) | ((_older_issq_rintents_T_367 ^ _older_issq_rintents_T_368 ^ _older_seq_rintents_T_83) & _vlissq_io_hazards_1_valid ? _vlissq_io_hazards_1_bits_rintent : 32'h0) | ((_older_issq_rintents_T_374 ^ _older_issq_rintents_T_375 ^ _older_seq_rintents_T_83) & _vlissq_io_hazards_2_valid ? _vlissq_io_hazards_2_bits_rintent : 32'h0) | ((_older_issq_rintents_T_381 ^ _older_issq_rintents_T_382 ^ _older_seq_rintents_T_83) & _vsissq_io_hazards_0_valid ? _vsissq_io_hazards_0_bits_rintent : 32'h0) | ((_older_issq_rintents_T_388 ^ _older_issq_rintents_T_389 ^ _older_seq_rintents_T_83) & _vsissq_io_hazards_1_valid ? _vsissq_io_hazards_1_bits_rintent : 32'h0) | ((_older_issq_rintents_T_395 ^ _older_issq_rintents_T_396 ^ _older_seq_rintents_T_83) & _vsissq_io_hazards_2_valid ? _vsissq_io_hazards_2_bits_rintent : 32'h0); // @[Parameters.scala:356:{53,59,65,73,79}]
wire [18:0] accepts_decode_invInputs = ~{_vdq_io_deq_bits_bits[19:15], _vdq_io_deq_bits_bits[24:20], _vdq_io_deq_bits_bits[14:12], _vdq_io_deq_bits_bits[31:26]}; // @[pla.scala:78:21]
wire [58:0] _accepts_decode_orMatrixOutputs_T =
{&{accepts_decode_invInputs[0], accepts_decode_invInputs[3], accepts_decode_invInputs[4], accepts_decode_invInputs[5], accepts_decode_invInputs[7]},
&{accepts_decode_invInputs[0], accepts_decode_invInputs[1], accepts_decode_invInputs[2], accepts_decode_invInputs[3], accepts_decode_invInputs[4], accepts_decode_invInputs[8]},
&{accepts_decode_invInputs[1], accepts_decode_invInputs[2], accepts_decode_invInputs[3], accepts_decode_invInputs[6], accepts_decode_invInputs[7], accepts_decode_invInputs[8]},
&{accepts_decode_invInputs[0], accepts_decode_invInputs[1], accepts_decode_invInputs[2], accepts_decode_invInputs[3], accepts_decode_invInputs[5], accepts_decode_invInputs[8], accepts_decode_invInputs[14], accepts_decode_invInputs[15], accepts_decode_invInputs[16], accepts_decode_invInputs[17], accepts_decode_invInputs[18]},
&{accepts_decode_invInputs[0], _vdq_io_deq_bits_bits[27], accepts_decode_invInputs[2], accepts_decode_invInputs[4], accepts_decode_invInputs[6], accepts_decode_invInputs[7]},
&{accepts_decode_invInputs[0], _vdq_io_deq_bits_bits[27], accepts_decode_invInputs[2], accepts_decode_invInputs[3], accepts_decode_invInputs[5], accepts_decode_invInputs[7], accepts_decode_invInputs[8], accepts_decode_invInputs[17]},
&{accepts_decode_invInputs[0], _vdq_io_deq_bits_bits[27], accepts_decode_invInputs[2], accepts_decode_invInputs[3], accepts_decode_invInputs[5], accepts_decode_invInputs[7], accepts_decode_invInputs[8], accepts_decode_invInputs[18]},
&{accepts_decode_invInputs[0], _vdq_io_deq_bits_bits[27], accepts_decode_invInputs[2], accepts_decode_invInputs[5], accepts_decode_invInputs[6], accepts_decode_invInputs[8], accepts_decode_invInputs[14], accepts_decode_invInputs[18]},
&{accepts_decode_invInputs[0], _vdq_io_deq_bits_bits[27], accepts_decode_invInputs[2], accepts_decode_invInputs[5], accepts_decode_invInputs[6], accepts_decode_invInputs[8], accepts_decode_invInputs[15], accepts_decode_invInputs[18]},
&{accepts_decode_invInputs[0], _vdq_io_deq_bits_bits[27], accepts_decode_invInputs[2], accepts_decode_invInputs[5], accepts_decode_invInputs[6], accepts_decode_invInputs[8], accepts_decode_invInputs[17], accepts_decode_invInputs[18]},
&{_vdq_io_deq_bits_bits[26], accepts_decode_invInputs[1], _vdq_io_deq_bits_bits[28], accepts_decode_invInputs[3], accepts_decode_invInputs[6], accepts_decode_invInputs[7]},
&{_vdq_io_deq_bits_bits[26], _vdq_io_deq_bits_bits[27], _vdq_io_deq_bits_bits[28], accepts_decode_invInputs[3], accepts_decode_invInputs[5], accepts_decode_invInputs[6], accepts_decode_invInputs[8]},
&{_vdq_io_deq_bits_bits[26], accepts_decode_invInputs[1], accepts_decode_invInputs[2], _vdq_io_deq_bits_bits[29], accepts_decode_invInputs[4], accepts_decode_invInputs[7]},
&{_vdq_io_deq_bits_bits[26], accepts_decode_invInputs[1], accepts_decode_invInputs[2], _vdq_io_deq_bits_bits[29], accepts_decode_invInputs[4], accepts_decode_invInputs[8]},
&{_vdq_io_deq_bits_bits[26], accepts_decode_invInputs[2], _vdq_io_deq_bits_bits[29], accepts_decode_invInputs[5], accepts_decode_invInputs[6], accepts_decode_invInputs[8]},
&{accepts_decode_invInputs[0], _vdq_io_deq_bits_bits[28], _vdq_io_deq_bits_bits[29], accepts_decode_invInputs[4], accepts_decode_invInputs[6], accepts_decode_invInputs[7]},
&{accepts_decode_invInputs[1], _vdq_io_deq_bits_bits[30], accepts_decode_invInputs[5], accepts_decode_invInputs[6], accepts_decode_invInputs[7]},
&{accepts_decode_invInputs[2], _vdq_io_deq_bits_bits[30], accepts_decode_invInputs[5], accepts_decode_invInputs[6], accepts_decode_invInputs[7]},
&{_vdq_io_deq_bits_bits[27], accepts_decode_invInputs[2], accepts_decode_invInputs[3], _vdq_io_deq_bits_bits[30], accepts_decode_invInputs[5], accepts_decode_invInputs[7], accepts_decode_invInputs[8], accepts_decode_invInputs[14], accepts_decode_invInputs[15], accepts_decode_invInputs[16], accepts_decode_invInputs[17]},
&{accepts_decode_invInputs[0], accepts_decode_invInputs[1], _vdq_io_deq_bits_bits[29], _vdq_io_deq_bits_bits[30], accepts_decode_invInputs[5], accepts_decode_invInputs[7]},
&{accepts_decode_invInputs[2], _vdq_io_deq_bits_bits[29], _vdq_io_deq_bits_bits[30], accepts_decode_invInputs[5], accepts_decode_invInputs[6], accepts_decode_invInputs[8]},
&{_vdq_io_deq_bits_bits[26], accepts_decode_invInputs[2], _vdq_io_deq_bits_bits[29], _vdq_io_deq_bits_bits[30], accepts_decode_invInputs[5], accepts_decode_invInputs[7]},
&{_vdq_io_deq_bits_bits[26], accepts_decode_invInputs[4], _vdq_io_deq_bits_bits[31], accepts_decode_invInputs[6]},
&{_vdq_io_deq_bits_bits[29], accepts_decode_invInputs[4], _vdq_io_deq_bits_bits[31], accepts_decode_invInputs[7]},
&{accepts_decode_invInputs[3], accepts_decode_invInputs[4], accepts_decode_invInputs[5], _vdq_io_deq_bits_bits[12], accepts_decode_invInputs[7], accepts_decode_invInputs[8]},
&{accepts_decode_invInputs[0], accepts_decode_invInputs[2], _vdq_io_deq_bits_bits[29], accepts_decode_invInputs[4], _vdq_io_deq_bits_bits[12], accepts_decode_invInputs[7]},
&{accepts_decode_invInputs[0], accepts_decode_invInputs[1], _vdq_io_deq_bits_bits[31], _vdq_io_deq_bits_bits[12], accepts_decode_invInputs[7]},
&{_vdq_io_deq_bits_bits[28], _vdq_io_deq_bits_bits[29], _vdq_io_deq_bits_bits[31], _vdq_io_deq_bits_bits[12], accepts_decode_invInputs[7]},
&{accepts_decode_invInputs[0], accepts_decode_invInputs[3], _vdq_io_deq_bits_bits[30], _vdq_io_deq_bits_bits[31], _vdq_io_deq_bits_bits[12], accepts_decode_invInputs[7]},
&{accepts_decode_invInputs[2], accepts_decode_invInputs[3], _vdq_io_deq_bits_bits[30], _vdq_io_deq_bits_bits[31], _vdq_io_deq_bits_bits[12], accepts_decode_invInputs[7], accepts_decode_invInputs[8]},
&{accepts_decode_invInputs[3], accepts_decode_invInputs[4], accepts_decode_invInputs[6], _vdq_io_deq_bits_bits[13], accepts_decode_invInputs[8]},
&{_vdq_io_deq_bits_bits[26], _vdq_io_deq_bits_bits[27], accepts_decode_invInputs[2], accepts_decode_invInputs[4], accepts_decode_invInputs[5], _vdq_io_deq_bits_bits[13], accepts_decode_invInputs[8]},
&{accepts_decode_invInputs[2], _vdq_io_deq_bits_bits[29], accepts_decode_invInputs[4], accepts_decode_invInputs[5], accepts_decode_invInputs[6], _vdq_io_deq_bits_bits[13]},
&{_vdq_io_deq_bits_bits[26], _vdq_io_deq_bits_bits[27], _vdq_io_deq_bits_bits[28], _vdq_io_deq_bits_bits[30], accepts_decode_invInputs[5], _vdq_io_deq_bits_bits[13], accepts_decode_invInputs[8]},
&{_vdq_io_deq_bits_bits[28], _vdq_io_deq_bits_bits[29], _vdq_io_deq_bits_bits[30], accepts_decode_invInputs[5], _vdq_io_deq_bits_bits[13], accepts_decode_invInputs[8]},
&{accepts_decode_invInputs[3], _vdq_io_deq_bits_bits[31], accepts_decode_invInputs[6], _vdq_io_deq_bits_bits[13]},
&{_vdq_io_deq_bits_bits[26], _vdq_io_deq_bits_bits[27], _vdq_io_deq_bits_bits[31], accepts_decode_invInputs[6], _vdq_io_deq_bits_bits[13]},
&{_vdq_io_deq_bits_bits[26], _vdq_io_deq_bits_bits[28], _vdq_io_deq_bits_bits[31], accepts_decode_invInputs[6], _vdq_io_deq_bits_bits[13]},
&{_vdq_io_deq_bits_bits[26], accepts_decode_invInputs[1], _vdq_io_deq_bits_bits[28], accepts_decode_invInputs[3], _vdq_io_deq_bits_bits[31], _vdq_io_deq_bits_bits[13], accepts_decode_invInputs[8]},
&{_vdq_io_deq_bits_bits[26], _vdq_io_deq_bits_bits[28], accepts_decode_invInputs[4], _vdq_io_deq_bits_bits[31], _vdq_io_deq_bits_bits[13], accepts_decode_invInputs[8]},
&{accepts_decode_invInputs[0], _vdq_io_deq_bits_bits[30], _vdq_io_deq_bits_bits[31], accepts_decode_invInputs[6], _vdq_io_deq_bits_bits[13]},
&{_vdq_io_deq_bits_bits[27], _vdq_io_deq_bits_bits[29], accepts_decode_invInputs[4], _vdq_io_deq_bits_bits[12], _vdq_io_deq_bits_bits[13], accepts_decode_invInputs[8]},
&{accepts_decode_invInputs[0], _vdq_io_deq_bits_bits[28], _vdq_io_deq_bits_bits[29], accepts_decode_invInputs[4], _vdq_io_deq_bits_bits[12], _vdq_io_deq_bits_bits[13], accepts_decode_invInputs[8]},
&{accepts_decode_invInputs[1], _vdq_io_deq_bits_bits[30], accepts_decode_invInputs[5], _vdq_io_deq_bits_bits[12], _vdq_io_deq_bits_bits[13], accepts_decode_invInputs[8]},
&{accepts_decode_invInputs[1], accepts_decode_invInputs[2], accepts_decode_invInputs[4], _vdq_io_deq_bits_bits[31], _vdq_io_deq_bits_bits[12], _vdq_io_deq_bits_bits[13], accepts_decode_invInputs[8]},
&{_vdq_io_deq_bits_bits[26], accepts_decode_invInputs[3], accepts_decode_invInputs[4], accepts_decode_invInputs[6], accepts_decode_invInputs[7], _vdq_io_deq_bits_bits[14]},
&{_vdq_io_deq_bits_bits[27], _vdq_io_deq_bits_bits[29], accepts_decode_invInputs[4], accepts_decode_invInputs[5], accepts_decode_invInputs[6], _vdq_io_deq_bits_bits[14]},
&{_vdq_io_deq_bits_bits[27], _vdq_io_deq_bits_bits[29], accepts_decode_invInputs[5], accepts_decode_invInputs[6], accepts_decode_invInputs[7], _vdq_io_deq_bits_bits[14]},
&{_vdq_io_deq_bits_bits[27], _vdq_io_deq_bits_bits[28], _vdq_io_deq_bits_bits[29], accepts_decode_invInputs[4], accepts_decode_invInputs[7], _vdq_io_deq_bits_bits[14]},
&{accepts_decode_invInputs[0], accepts_decode_invInputs[1], accepts_decode_invInputs[2], accepts_decode_invInputs[3], _vdq_io_deq_bits_bits[30], accepts_decode_invInputs[5], accepts_decode_invInputs[6], _vdq_io_deq_bits_bits[14], accepts_decode_invInputs[9], accepts_decode_invInputs[10], accepts_decode_invInputs[11], accepts_decode_invInputs[12], accepts_decode_invInputs[13]},
&{_vdq_io_deq_bits_bits[26], _vdq_io_deq_bits_bits[27], _vdq_io_deq_bits_bits[28], _vdq_io_deq_bits_bits[30], accepts_decode_invInputs[5], accepts_decode_invInputs[7], _vdq_io_deq_bits_bits[14]},
&{_vdq_io_deq_bits_bits[26], _vdq_io_deq_bits_bits[29], _vdq_io_deq_bits_bits[30], accepts_decode_invInputs[5], accepts_decode_invInputs[7], _vdq_io_deq_bits_bits[14]},
&{accepts_decode_invInputs[1], accepts_decode_invInputs[2], accepts_decode_invInputs[4], _vdq_io_deq_bits_bits[31], accepts_decode_invInputs[7], _vdq_io_deq_bits_bits[14]},
&{_vdq_io_deq_bits_bits[26], _vdq_io_deq_bits_bits[27], _vdq_io_deq_bits_bits[28], accepts_decode_invInputs[4], _vdq_io_deq_bits_bits[31], accepts_decode_invInputs[7], _vdq_io_deq_bits_bits[14]},
&{accepts_decode_invInputs[0], accepts_decode_invInputs[1], accepts_decode_invInputs[2], _vdq_io_deq_bits_bits[12], accepts_decode_invInputs[7], _vdq_io_deq_bits_bits[14], accepts_decode_invInputs[9], accepts_decode_invInputs[10], accepts_decode_invInputs[11], accepts_decode_invInputs[12], accepts_decode_invInputs[13]},
&{accepts_decode_invInputs[0], accepts_decode_invInputs[1], _vdq_io_deq_bits_bits[28], _vdq_io_deq_bits_bits[30], accepts_decode_invInputs[5], accepts_decode_invInputs[6], accepts_decode_invInputs[8], _vdq_io_deq_bits_bits[15], accepts_decode_invInputs[15], accepts_decode_invInputs[16], accepts_decode_invInputs[17]},
&{accepts_decode_invInputs[0], accepts_decode_invInputs[1], _vdq_io_deq_bits_bits[28], _vdq_io_deq_bits_bits[30], accepts_decode_invInputs[5], accepts_decode_invInputs[6], accepts_decode_invInputs[8], _vdq_io_deq_bits_bits[16], accepts_decode_invInputs[16], accepts_decode_invInputs[17], accepts_decode_invInputs[18]},
&{_vdq_io_deq_bits_bits[27], accepts_decode_invInputs[2], accepts_decode_invInputs[3], _vdq_io_deq_bits_bits[30], accepts_decode_invInputs[5], accepts_decode_invInputs[7], accepts_decode_invInputs[8], accepts_decode_invInputs[15], _vdq_io_deq_bits_bits[17], accepts_decode_invInputs[17], accepts_decode_invInputs[18]},
&{accepts_decode_invInputs[0], accepts_decode_invInputs[1], _vdq_io_deq_bits_bits[30], accepts_decode_invInputs[5], accepts_decode_invInputs[6], accepts_decode_invInputs[8], accepts_decode_invInputs[15], accepts_decode_invInputs[16], accepts_decode_invInputs[17], _vdq_io_deq_bits_bits[19]}}; // @[pla.scala:78:21, :90:45, :91:29, :98:{53,70}, :114:19]
wire accepts_0_3 = ~(_vdq_io_deq_bits_bits[6:0] == 7'h7 | _vdq_io_deq_bits_bits[6:0] == 7'h27) & (|_accepts_decode_orMatrixOutputs_T); // @[pla.scala:114:{19,36}]
assign issq_stall_3 = ~_vxissq_fp_int_io_enq_ready & accepts_0_3; // @[ExecuteSequencer.scala:14:50]
wire io_vmu_lresp_ready_0 = _vls_io_iss_valid & _vrf_io_ll_writes_0_ready; // @[Backend.scala:59:19, :283:19, :291:42]
wire vls_io_iss_ready = io_vmu_lresp_valid & _vrf_io_ll_writes_0_ready; // @[Backend.scala:283:19, :292:42] |
Generate the Verilog code corresponding to the following Chisel files.
File Monitor.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceLine
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import freechips.rocketchip.diplomacy.EnableMonitors
import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode}
import freechips.rocketchip.util.PlusArg
case class TLMonitorArgs(edge: TLEdge)
abstract class TLMonitorBase(args: TLMonitorArgs) extends Module
{
val io = IO(new Bundle {
val in = Input(new TLBundle(args.edge.bundle))
})
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit
legalize(io.in, args.edge, reset)
}
object TLMonitor {
def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = {
if (enable) {
EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) }
} else { node }
}
}
class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args)
{
require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal))
val cover_prop_class = PropertyClass.Default
//Like assert but can flip to being an assumption for formal verification
def monAssert(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir, cond, message, PropertyClass.Default)
}
def assume(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir.flip, cond, message, PropertyClass.Default)
}
def extra = {
args.edge.sourceInfo match {
case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)"
case _ => ""
}
}
def visible(address: UInt, source: UInt, edge: TLEdge) =
edge.client.clients.map { c =>
!c.sourceId.contains(source) ||
c.visibility.map(_.contains(address)).reduce(_ || _)
}.reduce(_ && _)
def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = {
//switch this flag to turn on diplomacy in error messages
def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n"
monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra)
// Reuse these subexpressions to save some firrtl lines
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility")
//The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B
//TODO: check for acquireT?
when (bundle.opcode === TLMessages.AcquireBlock) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AcquirePerm) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra)
monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra)
monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra)
monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra)
}
}
def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = {
monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility")
// Reuse these subexpressions to save some firrtl lines
val address_ok = edge.manager.containsSafe(edge.address(bundle))
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source
when (bundle.opcode === TLMessages.Probe) {
assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra)
assume (address_ok, "'B' channel Probe carries unmanaged address" + extra)
assume (legal_source, "'B' channel Probe carries source that is not first source" + extra)
assume (is_aligned, "'B' channel Probe address not aligned to size" + extra)
assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra)
assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra)
assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra)
monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra)
monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra)
}
}
def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = {
monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val address_ok = edge.manager.containsSafe(edge.address(bundle))
monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility")
when (bundle.opcode === TLMessages.ProbeAck) {
monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ProbeAckData) {
monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.Release) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ReleaseData) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra)
}
}
def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = {
assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val sink_ok = bundle.sink < edge.manager.endSinkId.U
val deny_put_ok = edge.manager.mayDenyPut.B
val deny_get_ok = edge.manager.mayDenyGet.B
when (bundle.opcode === TLMessages.ReleaseAck) {
assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra)
assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra)
assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra)
}
when (bundle.opcode === TLMessages.Grant) {
assume (source_ok, "'D' channel Grant carries invalid source ID" + extra)
assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra)
assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra)
}
when (bundle.opcode === TLMessages.GrantData) {
assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra)
assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra)
}
}
def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = {
val sink_ok = bundle.sink < edge.manager.endSinkId.U
monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra)
}
def legalizeFormat(bundle: TLBundle, edge: TLEdge) = {
when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) }
when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) }
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) }
when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) }
when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) }
} else {
monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra)
monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra)
monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra)
}
}
def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = {
val a_first = edge.first(a.bits, a.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (a.valid && !a_first) {
monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra)
monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra)
monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra)
monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra)
monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra)
}
when (a.fire && a_first) {
opcode := a.bits.opcode
param := a.bits.param
size := a.bits.size
source := a.bits.source
address := a.bits.address
}
}
def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = {
val b_first = edge.first(b.bits, b.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (b.valid && !b_first) {
monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra)
monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra)
monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra)
monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra)
monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra)
}
when (b.fire && b_first) {
opcode := b.bits.opcode
param := b.bits.param
size := b.bits.size
source := b.bits.source
address := b.bits.address
}
}
def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = {
// Symbolic variable
val sym_source = Wire(UInt(edge.client.endSourceId.W))
// TODO: Connect sym_source to a fixed value for simulation and to a
// free wire in formal
sym_source := 0.U
// Type casting Int to UInt
val maxSourceId = Wire(UInt(edge.client.endSourceId.W))
maxSourceId := edge.client.endSourceId.U
// Delayed verison of sym_source
val sym_source_d = Reg(UInt(edge.client.endSourceId.W))
sym_source_d := sym_source
// These will be constraints for FV setup
Property(
MonitorDirection.Monitor,
(sym_source === sym_source_d),
"sym_source should remain stable",
PropertyClass.Default)
Property(
MonitorDirection.Monitor,
(sym_source <= maxSourceId),
"sym_source should take legal value",
PropertyClass.Default)
val my_resp_pend = RegInit(false.B)
val my_opcode = Reg(UInt())
val my_size = Reg(UInt())
val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire)
val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire)
val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source)
val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source)
val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat)
val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend)
when (my_set_resp_pend) {
my_resp_pend := true.B
} .elsewhen (my_clr_resp_pend) {
my_resp_pend := false.B
}
when (my_a_first_beat) {
my_opcode := bundle.a.bits.opcode
my_size := bundle.a.bits.size
}
val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size)
val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode)
val my_resp_opcode_legal = Wire(Bool())
when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) ||
(my_resp_opcode === TLMessages.LogicalData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData)
} .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck)
} .otherwise {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck)
}
monAssert (IfThen(my_resp_pend, !my_a_first_beat),
"Request message should not be sent with a source ID, for which a response message" +
"is already pending (not received until current cycle) for a prior request message" +
"with the same source ID" + extra)
assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)),
"Response message should be accepted with a source ID only if a request message with the" +
"same source ID has been accepted or is being accepted in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)),
"Response message should be sent with a source ID only if a request message with the" +
"same source ID has been accepted or is being sent in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)),
"If d_valid is 1, then d_size should be same as a_size of the corresponding request" +
"message" + extra)
assume (IfThen(my_d_first_beat, my_resp_opcode_legal),
"If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" +
"request message" + extra)
}
def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = {
val c_first = edge.first(c.bits, c.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (c.valid && !c_first) {
monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra)
monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra)
monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra)
monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra)
monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra)
}
when (c.fire && c_first) {
opcode := c.bits.opcode
param := c.bits.param
size := c.bits.size
source := c.bits.source
address := c.bits.address
}
}
def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = {
val d_first = edge.first(d.bits, d.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val sink = Reg(UInt())
val denied = Reg(Bool())
when (d.valid && !d_first) {
assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra)
assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra)
assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra)
assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra)
assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra)
assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra)
}
when (d.fire && d_first) {
opcode := d.bits.opcode
param := d.bits.param
size := d.bits.size
source := d.bits.source
sink := d.bits.sink
denied := d.bits.denied
}
}
def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = {
legalizeMultibeatA(bundle.a, edge)
legalizeMultibeatD(bundle.d, edge)
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
legalizeMultibeatB(bundle.b, edge)
legalizeMultibeatC(bundle.c, edge)
}
}
//This is left in for almond which doesn't adhere to the tilelink protocol
@deprecated("Use legalizeADSource instead if possible","")
def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.client.endSourceId.W))
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val a_set = WireInit(0.U(edge.client.endSourceId.W))
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra)
}
if (edge.manager.minLatency > 0) {
assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = {
val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size)
val log_a_size_bus_size = log2Ceil(a_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error
inflight.suggestName("inflight")
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
inflight_opcodes.suggestName("inflight_opcodes")
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
inflight_sizes.suggestName("inflight_sizes")
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
a_first.suggestName("a_first")
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
d_first.suggestName("d_first")
val a_set = WireInit(0.U(edge.client.endSourceId.W))
val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
a_set.suggestName("a_set")
a_set_wo_ready.suggestName("a_set_wo_ready")
val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
a_opcodes_set.suggestName("a_opcodes_set")
val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
a_sizes_set.suggestName("a_sizes_set")
val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W))
a_opcode_lookup.suggestName("a_opcode_lookup")
a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U
val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W))
a_size_lookup.suggestName("a_size_lookup")
a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U
val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant))
val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant))
val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W))
a_opcodes_set_interm.suggestName("a_opcodes_set_interm")
val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W))
a_sizes_set_interm.suggestName("a_sizes_set_interm")
when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) {
a_set_wo_ready := UIntToOH(bundle.a.bits.source)
}
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U
a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U
a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U)
a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U)
monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
d_opcodes_clr.suggestName("d_opcodes_clr")
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) ||
(bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra)
assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) ||
(bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra)
assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) {
assume((!bundle.d.ready) || bundle.a.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = {
val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size)
val log_c_size_bus_size = log2Ceil(c_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W))
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
inflight.suggestName("inflight")
inflight_opcodes.suggestName("inflight_opcodes")
inflight_sizes.suggestName("inflight_sizes")
val c_first = edge.first(bundle.c.bits, bundle.c.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
c_first.suggestName("c_first")
d_first.suggestName("d_first")
val c_set = WireInit(0.U(edge.client.endSourceId.W))
val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
c_set.suggestName("c_set")
c_set_wo_ready.suggestName("c_set_wo_ready")
c_opcodes_set.suggestName("c_opcodes_set")
c_sizes_set.suggestName("c_sizes_set")
val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W))
val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W))
c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U
c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U
c_opcode_lookup.suggestName("c_opcode_lookup")
c_size_lookup.suggestName("c_size_lookup")
val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W))
val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W))
c_opcodes_set_interm.suggestName("c_opcodes_set_interm")
c_sizes_set_interm.suggestName("c_sizes_set_interm")
when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) {
c_set_wo_ready := UIntToOH(bundle.c.bits.source)
}
when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) {
c_set := UIntToOH(bundle.c.bits.source)
c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U
c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U
c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U)
c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U)
monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra)
}
val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
d_opcodes_clr.suggestName("d_opcodes_clr")
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) {
assume((!bundle.d.ready) || bundle.c.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
when (c_set_wo_ready.orR) {
assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra)
}
}
inflight := (inflight | c_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.manager.endSinkId.W))
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val e_first = true.B
val d_set = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) {
d_set := UIntToOH(bundle.d.bits.sink)
assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra)
}
val e_clr = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) {
e_clr := UIntToOH(bundle.e.bits.sink)
monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra)
}
// edge.client.minLatency applies to BC, not DE
inflight := (inflight | d_set) & ~e_clr
}
def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = {
val sourceBits = log2Ceil(edge.client.endSourceId)
val tooBig = 14 // >16kB worth of flight information gets to be too much
if (sourceBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked")
} else {
if (args.edge.params(TestplanTestType).simulation) {
if (args.edge.params(TLMonitorStrictMode)) {
legalizeADSource(bundle, edge)
legalizeCDSource(bundle, edge)
} else {
legalizeADSourceOld(bundle, edge)
}
}
if (args.edge.params(TestplanTestType).formal) {
legalizeADSourceFormal(bundle, edge)
}
}
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
// legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize...
val sinkBits = log2Ceil(edge.manager.endSinkId)
if (sinkBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked")
} else {
legalizeDESink(bundle, edge)
}
}
}
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = {
legalizeFormat (bundle, edge)
legalizeMultibeat (bundle, edge)
legalizeUnique (bundle, edge)
}
}
File Misc.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
import chisel3.util.random.LFSR
import org.chipsalliance.cde.config.Parameters
import scala.math._
class ParameterizedBundle(implicit p: Parameters) extends Bundle
trait Clocked extends Bundle {
val clock = Clock()
val reset = Bool()
}
object DecoupledHelper {
def apply(rvs: Bool*) = new DecoupledHelper(rvs)
}
class DecoupledHelper(val rvs: Seq[Bool]) {
def fire(exclude: Bool, includes: Bool*) = {
require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!")
(rvs.filter(_ ne exclude) ++ includes).reduce(_ && _)
}
def fire() = {
rvs.reduce(_ && _)
}
}
object MuxT {
def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2))
def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3))
def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4))
}
/** Creates a cascade of n MuxTs to search for a key value. */
object MuxTLookup {
def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
}
object ValidMux {
def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = {
apply(v1 +: v2.toSeq)
}
def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = {
val out = Wire(Valid(valids.head.bits.cloneType))
out.valid := valids.map(_.valid).reduce(_ || _)
out.bits := MuxCase(valids.head.bits,
valids.map(v => (v.valid -> v.bits)))
out
}
}
object Str
{
def apply(s: String): UInt = {
var i = BigInt(0)
require(s.forall(validChar _))
for (c <- s)
i = (i << 8) | c
i.U((s.length*8).W)
}
def apply(x: Char): UInt = {
require(validChar(x))
x.U(8.W)
}
def apply(x: UInt): UInt = apply(x, 10)
def apply(x: UInt, radix: Int): UInt = {
val rad = radix.U
val w = x.getWidth
require(w > 0)
var q = x
var s = digit(q % rad)
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s)
}
s
}
def apply(x: SInt): UInt = apply(x, 10)
def apply(x: SInt, radix: Int): UInt = {
val neg = x < 0.S
val abs = x.abs.asUInt
if (radix != 10) {
Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix))
} else {
val rad = radix.U
val w = abs.getWidth
require(w > 0)
var q = abs
var s = digit(q % rad)
var needSign = neg
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
val placeSpace = q === 0.U
val space = Mux(needSign, Str('-'), Str(' '))
needSign = needSign && !placeSpace
s = Cat(Mux(placeSpace, space, digit(q % rad)), s)
}
Cat(Mux(needSign, Str('-'), Str(' ')), s)
}
}
private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0)
private def validChar(x: Char) = x == (x & 0xFF)
}
object Split
{
def apply(x: UInt, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n2: Int, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
}
object Random
{
def apply(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0)
else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod))
}
def apply(mod: Int): UInt = apply(mod, randomizer)
def oneHot(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0))
else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt
}
def oneHot(mod: Int): UInt = oneHot(mod, randomizer)
private def randomizer = LFSR(16)
private def partition(value: UInt, slices: Int) =
Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U)
}
object Majority {
def apply(in: Set[Bool]): Bool = {
val n = (in.size >> 1) + 1
val clauses = in.subsets(n).map(_.reduce(_ && _))
clauses.reduce(_ || _)
}
def apply(in: Seq[Bool]): Bool = apply(in.toSet)
def apply(in: UInt): Bool = apply(in.asBools.toSet)
}
object PopCountAtLeast {
private def two(x: UInt): (Bool, Bool) = x.getWidth match {
case 1 => (x.asBool, false.B)
case n =>
val half = x.getWidth / 2
val (leftOne, leftTwo) = two(x(half - 1, 0))
val (rightOne, rightTwo) = two(x(x.getWidth - 1, half))
(leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne))
}
def apply(x: UInt, n: Int): Bool = n match {
case 0 => true.B
case 1 => x.orR
case 2 => two(x)._2
case 3 => PopCount(x) >= n.U
}
}
// This gets used everywhere, so make the smallest circuit possible ...
// Given an address and size, create a mask of beatBytes size
// eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111
// groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01
object MaskGen {
def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = {
require (groupBy >= 1 && beatBytes >= groupBy)
require (isPow2(beatBytes) && isPow2(groupBy))
val lgBytes = log2Ceil(beatBytes)
val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U
def helper(i: Int): Seq[(Bool, Bool)] = {
if (i == 0) {
Seq((lgSize >= lgBytes.asUInt, true.B))
} else {
val sub = helper(i-1)
val size = sizeOH(lgBytes - i)
val bit = addr_lo(lgBytes - i)
val nbit = !bit
Seq.tabulate (1 << i) { j =>
val (sub_acc, sub_eq) = sub(j/2)
val eq = sub_eq && (if (j % 2 == 1) bit else nbit)
val acc = sub_acc || (size && eq)
(acc, eq)
}
}
}
if (groupBy == beatBytes) 1.U else
Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse)
}
}
File PlusArg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.experimental._
import chisel3.util.HasBlackBoxResource
@deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05")
case class PlusArgInfo(default: BigInt, docstring: String)
/** Case class for PlusArg information
*
* @tparam A scala type of the PlusArg value
* @param default optional default value
* @param docstring text to include in the help
* @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT)
*/
private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String)
/** Typeclass for converting a type to a doctype string
* @tparam A some type
*/
trait Doctypeable[A] {
/** Return the doctype string for some option */
def toDoctype(a: Option[A]): String
}
/** Object containing implementations of the Doctypeable typeclass */
object Doctypes {
/** Converts an Int => "INT" */
implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" }
/** Converts a BigInt => "INT" */
implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" }
/** Converts a String => "STRING" */
implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" }
}
class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map(
"FORMAT" -> StringParam(format),
"DEFAULT" -> IntParam(default),
"WIDTH" -> IntParam(width)
)) with HasBlackBoxResource {
val io = IO(new Bundle {
val out = Output(UInt(width.W))
})
addResource("/vsrc/plusarg_reader.v")
}
/* This wrapper class has no outputs, making it clear it is a simulation-only construct */
class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module {
val io = IO(new Bundle {
val count = Input(UInt(width.W))
})
val max = Module(new plusarg_reader(format, default, docstring, width)).io.out
when (max > 0.U) {
assert (io.count < max, s"Timeout exceeded: $docstring")
}
}
import Doctypes._
object PlusArg
{
/** PlusArg("foo") will return 42.U if the simulation is run with +foo=42
* Do not use this as an initial register value. The value is set in an
* initial block and thus accessing it from another initial is racey.
* Add a docstring to document the arg, which can be dumped in an elaboration
* pass.
*/
def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out
}
/** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert
* to kill the simulation when count exceeds the specified integer argument.
* Default 0 will never assert.
*/
def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count
}
}
object PlusArgArtefacts {
private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty
/* Add a new PlusArg */
@deprecated(
"Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08",
"Rocket Chip 2020.05"
)
def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring)
/** Add a new PlusArg
*
* @tparam A scala type of the PlusArg value
* @param name name for the PlusArg
* @param default optional default value
* @param docstring text to include in the help
*/
def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit =
artefacts = artefacts ++
Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default)))
/* From plus args, generate help text */
private def serializeHelp_cHeader(tab: String = ""): String = artefacts
.map{ case(arg, info) =>
s"""|$tab+$arg=${info.doctype}\\n\\
|$tab${" "*20}${info.docstring}\\n\\
|""".stripMargin ++ info.default.map{ case default =>
s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("")
}.toSeq.mkString("\\n\\\n") ++ "\""
/* From plus args, generate a char array of their names */
private def serializeArray_cHeader(tab: String = ""): String = {
val prettyTab = tab + " " * 44 // Length of 'static const ...'
s"${tab}static const char * verilog_plusargs [] = {\\\n" ++
artefacts
.map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" }
.mkString("")++
s"${prettyTab}0};"
}
/* Generate C code to be included in emulator.cc that helps with
* argument parsing based on available Verilog PlusArgs */
def serialize_cHeader(): String =
s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\
|${serializeHelp_cHeader(" "*7)}
|${serializeArray_cHeader()}
|""".stripMargin
}
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
File Parameters.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.diplomacy
import chisel3._
import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor}
import freechips.rocketchip.util.ShiftQueue
/** Options for describing the attributes of memory regions */
object RegionType {
// Define the 'more relaxed than' ordering
val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS)
sealed trait T extends Ordered[T] {
def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this)
}
case object CACHED extends T // an intermediate agent may have cached a copy of the region for you
case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided
case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible
case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached
case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects
case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed
case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively
}
// A non-empty half-open range; [start, end)
case class IdRange(start: Int, end: Int) extends Ordered[IdRange]
{
require (start >= 0, s"Ids cannot be negative, but got: $start.")
require (start <= end, "Id ranges cannot be negative.")
def compare(x: IdRange) = {
val primary = (this.start - x.start).signum
val secondary = (x.end - this.end).signum
if (primary != 0) primary else secondary
}
def overlaps(x: IdRange) = start < x.end && x.start < end
def contains(x: IdRange) = start <= x.start && x.end <= end
def contains(x: Int) = start <= x && x < end
def contains(x: UInt) =
if (size == 0) {
false.B
} else if (size == 1) { // simple comparison
x === start.U
} else {
// find index of largest different bit
val largestDeltaBit = log2Floor(start ^ (end-1))
val smallestCommonBit = largestDeltaBit + 1 // may not exist in x
val uncommonMask = (1 << smallestCommonBit) - 1
val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0)
// the prefix must match exactly (note: may shift ALL bits away)
(x >> smallestCommonBit) === (start >> smallestCommonBit).U &&
// firrtl constant prop range analysis can eliminate these two:
(start & uncommonMask).U <= uncommonBits &&
uncommonBits <= ((end-1) & uncommonMask).U
}
def shift(x: Int) = IdRange(start+x, end+x)
def size = end - start
def isEmpty = end == start
def range = start until end
}
object IdRange
{
def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else {
val ranges = s.sorted
(ranges.tail zip ranges.init) find { case (a, b) => a overlaps b }
}
}
// An potentially empty inclusive range of 2-powers [min, max] (in bytes)
case class TransferSizes(min: Int, max: Int)
{
def this(x: Int) = this(x, x)
require (min <= max, s"Min transfer $min > max transfer $max")
require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)")
require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max")
require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min")
require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)")
def none = min == 0
def contains(x: Int) = isPow2(x) && min <= x && x <= max
def containsLg(x: Int) = contains(1 << x)
def containsLg(x: UInt) =
if (none) false.B
else if (min == max) { log2Ceil(min).U === x }
else { log2Ceil(min).U <= x && x <= log2Ceil(max).U }
def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max)
def intersect(x: TransferSizes) =
if (x.max < min || max < x.min) TransferSizes.none
else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max))
// Not a union, because the result may contain sizes contained by neither term
// NOT TO BE CONFUSED WITH COVERPOINTS
def mincover(x: TransferSizes) = {
if (none) {
x
} else if (x.none) {
this
} else {
TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max))
}
}
override def toString() = "TransferSizes[%d, %d]".format(min, max)
}
object TransferSizes {
def apply(x: Int) = new TransferSizes(x)
val none = new TransferSizes(0)
def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _)
def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _)
implicit def asBool(x: TransferSizes) = !x.none
}
// AddressSets specify the address space managed by the manager
// Base is the base address, and mask are the bits consumed by the manager
// e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff
// e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ...
case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet]
{
// Forbid misaligned base address (and empty sets)
require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}")
require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous
// We do allow negative mask (=> ignore all high bits)
def contains(x: BigInt) = ((x ^ base) & ~mask) == 0
def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S
// turn x into an address contained in this set
def legalize(x: UInt): UInt = base.U | (mask.U & x)
// overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1)
def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0
// contains iff bitwise: x.mask => mask && contains(x.base)
def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0
// The number of bytes to which the manager must be aligned
def alignment = ((mask + 1) & ~mask)
// Is this a contiguous memory range
def contiguous = alignment == mask+1
def finite = mask >= 0
def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask }
// Widen the match function to ignore all bits in imask
def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask)
// Return an AddressSet that only contains the addresses both sets contain
def intersect(x: AddressSet): Option[AddressSet] = {
if (!overlaps(x)) {
None
} else {
val r_mask = mask & x.mask
val r_base = base | x.base
Some(AddressSet(r_base, r_mask))
}
}
def subtract(x: AddressSet): Seq[AddressSet] = {
intersect(x) match {
case None => Seq(this)
case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit =>
val nmask = (mask & (bit-1)) | remove.mask
val nbase = (remove.base ^ bit) & ~nmask
AddressSet(nbase, nmask)
}
}
}
// AddressSets have one natural Ordering (the containment order, if contiguous)
def compare(x: AddressSet) = {
val primary = (this.base - x.base).signum // smallest address first
val secondary = (x.mask - this.mask).signum // largest mask first
if (primary != 0) primary else secondary
}
// We always want to see things in hex
override def toString() = {
if (mask >= 0) {
"AddressSet(0x%x, 0x%x)".format(base, mask)
} else {
"AddressSet(0x%x, ~0x%x)".format(base, ~mask)
}
}
def toRanges = {
require (finite, "Ranges cannot be calculated on infinite mask")
val size = alignment
val fragments = mask & ~(size-1)
val bits = bitIndexes(fragments)
(BigInt(0) until (BigInt(1) << bits.size)).map { i =>
val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) }
AddressRange(off, size)
}
}
}
object AddressSet
{
val everything = AddressSet(0, -1)
def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = {
if (size == 0) tail.reverse else {
val maxBaseAlignment = base & (-base) // 0 for infinite (LSB)
val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size
val step =
if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment)
maxSizeAlignment else maxBaseAlignment
misaligned(base+step, size-step, AddressSet(base, step-1) +: tail)
}
}
def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = {
// Pair terms up by ignoring 'bit'
seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) =>
if (seq.size == 1) {
seq.head // singleton -> unaffected
} else {
key.copy(mask = key.mask | bit) // pair - widen mask by bit
}
}.toList
}
def unify(seq: Seq[AddressSet]): Seq[AddressSet] = {
val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _)
AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted
}
def enumerateMask(mask: BigInt): Seq[BigInt] = {
def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] =
if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail)
helper(0, Nil)
}
def enumerateBits(mask: BigInt): Seq[BigInt] = {
def helper(x: BigInt): Seq[BigInt] = {
if (x == 0) {
Nil
} else {
val bit = x & (-x)
bit +: helper(x & ~bit)
}
}
helper(mask)
}
}
case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean)
{
require (depth >= 0, "Buffer depth must be >= 0")
def isDefined = depth > 0
def latency = if (isDefined && !flow) 1 else 0
def apply[T <: Data](x: DecoupledIO[T]) =
if (isDefined) Queue(x, depth, flow=flow, pipe=pipe)
else x
def irrevocable[T <: Data](x: ReadyValidIO[T]) =
if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe)
else x
def sq[T <: Data](x: DecoupledIO[T]) =
if (!isDefined) x else {
val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe))
sq.io.enq <> x
sq.io.deq
}
override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "")
}
object BufferParams
{
implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false)
val default = BufferParams(2)
val none = BufferParams(0)
val flow = BufferParams(1, true, false)
val pipe = BufferParams(1, false, true)
}
case class TriStateValue(value: Boolean, set: Boolean)
{
def update(orig: Boolean) = if (set) value else orig
}
object TriStateValue
{
implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true)
def unset = TriStateValue(false, false)
}
trait DirectedBuffers[T] {
def copyIn(x: BufferParams): T
def copyOut(x: BufferParams): T
def copyInOut(x: BufferParams): T
}
trait IdMapEntry {
def name: String
def from: IdRange
def to: IdRange
def isCache: Boolean
def requestFifo: Boolean
def maxTransactionsInFlight: Option[Int]
def pretty(fmt: String) =
if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5
fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
} else {
fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
}
}
abstract class IdMap[T <: IdMapEntry] {
protected val fmt: String
val mapping: Seq[T]
def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n")
}
File Edges.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
class TLEdge(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdgeParameters(client, manager, params, sourceInfo)
{
def isAligned(address: UInt, lgSize: UInt): Bool = {
if (maxLgSize == 0) true.B else {
val mask = UIntToOH1(lgSize, maxLgSize)
(address & mask) === 0.U
}
}
def mask(address: UInt, lgSize: UInt): UInt =
MaskGen(address, lgSize, manager.beatBytes)
def staticHasData(bundle: TLChannel): Option[Boolean] = {
bundle match {
case _:TLBundleA => {
// Do there exist A messages with Data?
val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial
// Do there exist A messages without Data?
val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint
// Statically optimize the case where hasData is a constant
if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None
}
case _:TLBundleB => {
// Do there exist B messages with Data?
val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial
// Do there exist B messages without Data?
val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint
// Statically optimize the case where hasData is a constant
if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None
}
case _:TLBundleC => {
// Do there eixst C messages with Data?
val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe
// Do there exist C messages without Data?
val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe
if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None
}
case _:TLBundleD => {
// Do there eixst D messages with Data?
val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB
// Do there exist D messages without Data?
val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT
if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None
}
case _:TLBundleE => Some(false)
}
}
def isRequest(x: TLChannel): Bool = {
x match {
case a: TLBundleA => true.B
case b: TLBundleB => true.B
case c: TLBundleC => c.opcode(2) && c.opcode(1)
// opcode === TLMessages.Release ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(2) && !d.opcode(1)
// opcode === TLMessages.Grant ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
}
def isResponse(x: TLChannel): Bool = {
x match {
case a: TLBundleA => false.B
case b: TLBundleB => false.B
case c: TLBundleC => !c.opcode(2) || !c.opcode(1)
// opcode =/= TLMessages.Release &&
// opcode =/= TLMessages.ReleaseData
case d: TLBundleD => true.B // Grant isResponse + isRequest
case e: TLBundleE => true.B
}
}
def hasData(x: TLChannel): Bool = {
val opdata = x match {
case a: TLBundleA => !a.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case b: TLBundleB => !b.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case c: TLBundleC => c.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.ProbeAckData ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
staticHasData(x).map(_.B).getOrElse(opdata)
}
def opcode(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.opcode
case b: TLBundleB => b.opcode
case c: TLBundleC => c.opcode
case d: TLBundleD => d.opcode
}
}
def param(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.param
case b: TLBundleB => b.param
case c: TLBundleC => c.param
case d: TLBundleD => d.param
}
}
def size(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.size
case b: TLBundleB => b.size
case c: TLBundleC => c.size
case d: TLBundleD => d.size
}
}
def data(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.data
case b: TLBundleB => b.data
case c: TLBundleC => c.data
case d: TLBundleD => d.data
}
}
def corrupt(x: TLDataChannel): Bool = {
x match {
case a: TLBundleA => a.corrupt
case b: TLBundleB => b.corrupt
case c: TLBundleC => c.corrupt
case d: TLBundleD => d.corrupt
}
}
def mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.mask
case b: TLBundleB => b.mask
case c: TLBundleC => mask(c.address, c.size)
}
}
def full_mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => mask(a.address, a.size)
case b: TLBundleB => mask(b.address, b.size)
case c: TLBundleC => mask(c.address, c.size)
}
}
def address(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.address
case b: TLBundleB => b.address
case c: TLBundleC => c.address
}
}
def source(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.source
case b: TLBundleB => b.source
case c: TLBundleC => c.source
case d: TLBundleD => d.source
}
}
def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes)
def addr_lo(x: UInt): UInt =
if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0)
def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x))
def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x))
def numBeats(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 1.U
case bundle: TLDataChannel => {
val hasData = this.hasData(bundle)
val size = this.size(bundle)
val cutoff = log2Ceil(manager.beatBytes)
val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U
val decode = UIntToOH(size, maxLgSize+1) >> cutoff
Mux(hasData, decode | small.asUInt, 1.U)
}
}
}
def numBeats1(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 0.U
case bundle: TLDataChannel => {
if (maxLgSize == 0) {
0.U
} else {
val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes)
Mux(hasData(bundle), decode, 0.U)
}
}
}
}
def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val beats1 = numBeats1(bits)
val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W))
val counter1 = counter - 1.U
val first = counter === 0.U
val last = counter === 1.U || beats1 === 0.U
val done = last && fire
val count = (beats1 & ~counter1)
when (fire) {
counter := Mux(first, beats1, counter1)
}
(first, last, done, count)
}
def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1
def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire)
def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid)
def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2
def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire)
def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid)
def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3
def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire)
def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid)
def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3)
}
def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire)
def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid)
def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4)
}
def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire)
def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid)
def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes))
}
def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire)
def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid)
// Does the request need T permissions to be executed?
def needT(a: TLBundleA): Bool = {
val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLPermissions.NtoB -> false.B,
TLPermissions.NtoT -> true.B,
TLPermissions.BtoT -> true.B))
MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array(
TLMessages.PutFullData -> true.B,
TLMessages.PutPartialData -> true.B,
TLMessages.ArithmeticData -> true.B,
TLMessages.LogicalData -> true.B,
TLMessages.Get -> false.B,
TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLHints.PREFETCH_READ -> false.B,
TLHints.PREFETCH_WRITE -> true.B)),
TLMessages.AcquireBlock -> acq_needT,
TLMessages.AcquirePerm -> acq_needT))
}
// This is a very expensive circuit; use only if you really mean it!
def inFlight(x: TLBundle): (UInt, UInt) = {
val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W))
val bce = manager.anySupportAcquireB && client.anySupportProbe
val (a_first, a_last, _) = firstlast(x.a)
val (b_first, b_last, _) = firstlast(x.b)
val (c_first, c_last, _) = firstlast(x.c)
val (d_first, d_last, _) = firstlast(x.d)
val (e_first, e_last, _) = firstlast(x.e)
val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits))
val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits))
val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits))
val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits))
val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits))
val a_inc = x.a.fire && a_first && a_request
val b_inc = x.b.fire && b_first && b_request
val c_inc = x.c.fire && c_first && c_request
val d_inc = x.d.fire && d_first && d_request
val e_inc = x.e.fire && e_first && e_request
val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil))
val a_dec = x.a.fire && a_last && a_response
val b_dec = x.b.fire && b_last && b_response
val c_dec = x.c.fire && c_last && c_response
val d_dec = x.d.fire && d_last && d_response
val e_dec = x.e.fire && e_last && e_response
val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil))
val next_flight = flight + PopCount(inc) - PopCount(dec)
flight := next_flight
(flight, next_flight)
}
def prettySourceMapping(context: String): String = {
s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n"
}
}
class TLEdgeOut(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
// Transfers
def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquireBlock
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquirePerm
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.Release
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ReleaseData
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) =
Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B)
def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAck
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions, data)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAckData
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B)
def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink)
def GrantAck(toSink: UInt): TLBundleE = {
val e = Wire(new TLBundleE(bundle))
e.sink := toSink
e
}
// Accesses
def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsGetFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Get
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutFullFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutFullData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, mask, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutPartialFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutPartialData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = {
require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsArithmeticFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.ArithmeticData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsLogicalFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.LogicalData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = {
require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsHintFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Hint
a.param := param
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data)
def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAckData
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size)
def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.HintAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
}
class TLEdgeIn(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = {
val todo = x.filter(!_.isEmpty)
val heads = todo.map(_.head)
val tails = todo.map(_.tail)
if (todo.isEmpty) Nil else { heads +: myTranspose(tails) }
}
// Transfers
def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = {
require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}")
val legal = client.supportsProbe(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Probe
b.param := capPermissions
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.Grant
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.GrantData
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B)
def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.ReleaseAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
// Accesses
def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = {
require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsGet(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Get
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsPutFull(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutFullData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, mask, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsPutPartial(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutPartialData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsArithmetic(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.ArithmeticData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsLogical(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.LogicalData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = {
require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsHint(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Hint
b.param := param
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size)
def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied)
def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data)
def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAckData
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B)
def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied)
def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B)
def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.HintAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
}
| module TLMonitor_17( // @[Monitor.scala:36:7]
input clock, // @[Monitor.scala:36:7]
input reset, // @[Monitor.scala:36:7]
input io_in_a_ready, // @[Monitor.scala:20:14]
input io_in_a_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14]
input [3:0] io_in_a_bits_size, // @[Monitor.scala:20:14]
input [3:0] io_in_a_bits_source, // @[Monitor.scala:20:14]
input [31:0] io_in_a_bits_address, // @[Monitor.scala:20:14]
input [7:0] io_in_a_bits_mask, // @[Monitor.scala:20:14]
input io_in_a_bits_corrupt, // @[Monitor.scala:20:14]
input io_in_d_ready, // @[Monitor.scala:20:14]
input io_in_d_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14]
input [1:0] io_in_d_bits_param, // @[Monitor.scala:20:14]
input [3:0] io_in_d_bits_size, // @[Monitor.scala:20:14]
input [3:0] io_in_d_bits_source, // @[Monitor.scala:20:14]
input [4:0] io_in_d_bits_sink, // @[Monitor.scala:20:14]
input io_in_d_bits_denied, // @[Monitor.scala:20:14]
input io_in_d_bits_corrupt // @[Monitor.scala:20:14]
);
wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11]
wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11]
wire [26:0] _GEN = {23'h0, io_in_a_bits_size}; // @[package.scala:243:71]
wire _a_first_T_1 = io_in_a_ready & io_in_a_valid; // @[Decoupled.scala:51:35]
reg [8:0] a_first_counter; // @[Edges.scala:229:27]
reg [2:0] opcode; // @[Monitor.scala:387:22]
reg [2:0] param; // @[Monitor.scala:388:22]
reg [3:0] size; // @[Monitor.scala:389:22]
reg [3:0] source; // @[Monitor.scala:390:22]
reg [31:0] address; // @[Monitor.scala:391:22]
reg [8:0] d_first_counter; // @[Edges.scala:229:27]
reg [2:0] opcode_1; // @[Monitor.scala:538:22]
reg [1:0] param_1; // @[Monitor.scala:539:22]
reg [3:0] size_1; // @[Monitor.scala:540:22]
reg [3:0] source_1; // @[Monitor.scala:541:22]
reg [4:0] sink; // @[Monitor.scala:542:22]
reg denied; // @[Monitor.scala:543:22]
reg [15:0] inflight; // @[Monitor.scala:614:27]
reg [63:0] inflight_opcodes; // @[Monitor.scala:616:35]
reg [127:0] inflight_sizes; // @[Monitor.scala:618:33]
reg [8:0] a_first_counter_1; // @[Edges.scala:229:27]
wire a_first_1 = a_first_counter_1 == 9'h0; // @[Edges.scala:229:27, :231:25]
reg [8:0] d_first_counter_1; // @[Edges.scala:229:27]
wire d_first_1 = d_first_counter_1 == 9'h0; // @[Edges.scala:229:27, :231:25]
wire [15:0] _GEN_0 = {12'h0, io_in_a_bits_source}; // @[OneHot.scala:58:35]
wire _GEN_1 = _a_first_T_1 & a_first_1; // @[Decoupled.scala:51:35]
wire d_release_ack = io_in_d_bits_opcode == 3'h6; // @[Monitor.scala:673:46]
wire _GEN_2 = io_in_d_bits_opcode != 3'h6; // @[Monitor.scala:673:46, :674:74]
wire [15:0] _GEN_3 = {12'h0, io_in_d_bits_source}; // @[OneHot.scala:58:35]
reg [31:0] watchdog; // @[Monitor.scala:709:27]
reg [15:0] inflight_1; // @[Monitor.scala:726:35]
reg [127:0] inflight_sizes_1; // @[Monitor.scala:728:35]
reg [8:0] d_first_counter_2; // @[Edges.scala:229:27]
wire d_first_2 = d_first_counter_2 == 9'h0; // @[Edges.scala:229:27, :231:25]
reg [31:0] watchdog_1; // @[Monitor.scala:818:27] |
Generate the Verilog code corresponding to the following Chisel files.
File UnsafeAXI4ToTL.scala:
package ara
import chisel3._
import chisel3.util._
import freechips.rocketchip.amba._
import freechips.rocketchip.amba.axi4._
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.tilelink._
import freechips.rocketchip.util._
class ReorderData(val dataWidth: Int, val respWidth: Int, val userFields: Seq[BundleFieldBase]) extends Bundle {
val data = UInt(dataWidth.W)
val resp = UInt(respWidth.W)
val last = Bool()
val user = BundleMap(userFields)
}
/** Parameters for [[BaseReservableListBuffer]] and all child classes.
*
* @param numEntries Total number of elements that can be stored in the 'data' RAM
* @param numLists Maximum number of linked lists
* @param numBeats Maximum number of beats per entry
*/
case class ReservableListBufferParameters(numEntries: Int, numLists: Int, numBeats: Int) {
// Avoid zero-width wires when we call 'log2Ceil'
val entryBits = if (numEntries == 1) 1 else log2Ceil(numEntries)
val listBits = if (numLists == 1) 1 else log2Ceil(numLists)
val beatBits = if (numBeats == 1) 1 else log2Ceil(numBeats)
}
case class UnsafeAXI4ToTLNode(numTlTxns: Int, wcorrupt: Boolean)(implicit valName: ValName)
extends MixedAdapterNode(AXI4Imp, TLImp)(
dFn = { case mp =>
TLMasterPortParameters.v2(
masters = mp.masters.zipWithIndex.map { case (m, i) =>
// Support 'numTlTxns' read requests and 'numTlTxns' write requests at once.
val numSourceIds = numTlTxns * 2
TLMasterParameters.v2(
name = m.name,
sourceId = IdRange(i * numSourceIds, (i + 1) * numSourceIds),
nodePath = m.nodePath
)
},
echoFields = mp.echoFields,
requestFields = AMBAProtField() +: mp.requestFields,
responseKeys = mp.responseKeys
)
},
uFn = { mp =>
AXI4SlavePortParameters(
slaves = mp.managers.map { m =>
val maxXfer = TransferSizes(1, mp.beatBytes * (1 << AXI4Parameters.lenBits))
AXI4SlaveParameters(
address = m.address,
resources = m.resources,
regionType = m.regionType,
executable = m.executable,
nodePath = m.nodePath,
supportsWrite = m.supportsPutPartial.intersect(maxXfer),
supportsRead = m.supportsGet.intersect(maxXfer),
interleavedId = Some(0) // TL2 never interleaves D beats
)
},
beatBytes = mp.beatBytes,
minLatency = mp.minLatency,
responseFields = mp.responseFields,
requestKeys = (if (wcorrupt) Seq(AMBACorrupt) else Seq()) ++ mp.requestKeys.filter(_ != AMBAProt)
)
}
)
class UnsafeAXI4ToTL(numTlTxns: Int, wcorrupt: Boolean)(implicit p: Parameters) extends LazyModule {
require(numTlTxns >= 1)
require(isPow2(numTlTxns), s"Number of TileLink transactions ($numTlTxns) must be a power of 2")
val node = UnsafeAXI4ToTLNode(numTlTxns, wcorrupt)
lazy val module = new LazyModuleImp(this) {
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
edgeIn.master.masters.foreach { m =>
require(m.aligned, "AXI4ToTL requires aligned requests")
}
val numIds = edgeIn.master.endId
val beatBytes = edgeOut.slave.beatBytes
val maxTransfer = edgeOut.slave.maxTransfer
val maxBeats = maxTransfer / beatBytes
// Look for an Error device to redirect bad requests
val errorDevs = edgeOut.slave.managers.filter(_.nodePath.last.lazyModule.className == "TLError")
require(!errorDevs.isEmpty, "There is no TLError reachable from AXI4ToTL. One must be instantiated.")
val errorDev = errorDevs.maxBy(_.maxTransfer)
val errorDevAddr = errorDev.address.head.base
require(
errorDev.supportsPutPartial.contains(maxTransfer),
s"Error device supports ${errorDev.supportsPutPartial} PutPartial but must support $maxTransfer"
)
require(
errorDev.supportsGet.contains(maxTransfer),
s"Error device supports ${errorDev.supportsGet} Get but must support $maxTransfer"
)
// All of the read-response reordering logic.
val listBufData = new ReorderData(beatBytes * 8, edgeIn.bundle.respBits, out.d.bits.user.fields)
val listBufParams = ReservableListBufferParameters(numTlTxns, numIds, maxBeats)
val listBuffer = if (numTlTxns > 1) {
Module(new ReservableListBuffer(listBufData, listBufParams))
} else {
Module(new PassthroughListBuffer(listBufData, listBufParams))
}
// To differentiate between read and write transaction IDs, we will set the MSB of the TileLink 'source' field to
// 0 for read requests and 1 for write requests.
val isReadSourceBit = 0.U(1.W)
val isWriteSourceBit = 1.U(1.W)
/* Read request logic */
val rOut = Wire(Decoupled(new TLBundleA(edgeOut.bundle)))
val rBytes1 = in.ar.bits.bytes1()
val rSize = OH1ToUInt(rBytes1)
val rOk = edgeOut.slave.supportsGetSafe(in.ar.bits.addr, rSize)
val rId = if (numTlTxns > 1) {
Cat(isReadSourceBit, listBuffer.ioReservedIndex)
} else {
isReadSourceBit
}
val rAddr = Mux(rOk, in.ar.bits.addr, errorDevAddr.U | in.ar.bits.addr(log2Ceil(beatBytes) - 1, 0))
// Indicates if there are still valid TileLink source IDs left to use.
val canIssueR = listBuffer.ioReserve.ready
listBuffer.ioReserve.bits := in.ar.bits.id
listBuffer.ioReserve.valid := in.ar.valid && rOut.ready
in.ar.ready := rOut.ready && canIssueR
rOut.valid := in.ar.valid && canIssueR
rOut.bits :<= edgeOut.Get(rId, rAddr, rSize)._2
rOut.bits.user :<= in.ar.bits.user
rOut.bits.user.lift(AMBAProt).foreach { rProt =>
rProt.privileged := in.ar.bits.prot(0)
rProt.secure := !in.ar.bits.prot(1)
rProt.fetch := in.ar.bits.prot(2)
rProt.bufferable := in.ar.bits.cache(0)
rProt.modifiable := in.ar.bits.cache(1)
rProt.readalloc := in.ar.bits.cache(2)
rProt.writealloc := in.ar.bits.cache(3)
}
/* Write request logic */
// Strip off the MSB, which identifies the transaction as read vs write.
val strippedResponseSourceId = if (numTlTxns > 1) {
out.d.bits.source((out.d.bits.source).getWidth - 2, 0)
} else {
// When there's only 1 TileLink transaction allowed for read/write, then this field is always 0.
0.U(1.W)
}
// Track when a write request burst is in progress.
val writeBurstBusy = RegInit(false.B)
when(in.w.fire) {
writeBurstBusy := !in.w.bits.last
}
val usedWriteIds = RegInit(0.U(numTlTxns.W))
val canIssueW = !usedWriteIds.andR
val usedWriteIdsSet = WireDefault(0.U(numTlTxns.W))
val usedWriteIdsClr = WireDefault(0.U(numTlTxns.W))
usedWriteIds := (usedWriteIds & ~usedWriteIdsClr) | usedWriteIdsSet
// Since write responses can show up in the middle of a write burst, we need to ensure the write burst ID doesn't
// change mid-burst.
val freeWriteIdOHRaw = Wire(UInt(numTlTxns.W))
val freeWriteIdOH = freeWriteIdOHRaw holdUnless !writeBurstBusy
val freeWriteIdIndex = OHToUInt(freeWriteIdOH)
freeWriteIdOHRaw := ~(leftOR(~usedWriteIds) << 1) & ~usedWriteIds
val wOut = Wire(Decoupled(new TLBundleA(edgeOut.bundle)))
val wBytes1 = in.aw.bits.bytes1()
val wSize = OH1ToUInt(wBytes1)
val wOk = edgeOut.slave.supportsPutPartialSafe(in.aw.bits.addr, wSize)
val wId = if (numTlTxns > 1) {
Cat(isWriteSourceBit, freeWriteIdIndex)
} else {
isWriteSourceBit
}
val wAddr = Mux(wOk, in.aw.bits.addr, errorDevAddr.U | in.aw.bits.addr(log2Ceil(beatBytes) - 1, 0))
// Here, we're taking advantage of the Irrevocable behavior of AXI4 (once 'valid' is asserted it must remain
// asserted until the handshake occurs). We will only accept W-channel beats when we have a valid AW beat, but
// the AW-channel beat won't fire until the final W-channel beat fires. So, we have stable address/size/strb
// bits during a W-channel burst.
in.aw.ready := wOut.ready && in.w.valid && in.w.bits.last && canIssueW
in.w.ready := wOut.ready && in.aw.valid && canIssueW
wOut.valid := in.aw.valid && in.w.valid && canIssueW
wOut.bits :<= edgeOut.Put(wId, wAddr, wSize, in.w.bits.data, in.w.bits.strb)._2
in.w.bits.user.lift(AMBACorrupt).foreach { wOut.bits.corrupt := _ }
wOut.bits.user :<= in.aw.bits.user
wOut.bits.user.lift(AMBAProt).foreach { wProt =>
wProt.privileged := in.aw.bits.prot(0)
wProt.secure := !in.aw.bits.prot(1)
wProt.fetch := in.aw.bits.prot(2)
wProt.bufferable := in.aw.bits.cache(0)
wProt.modifiable := in.aw.bits.cache(1)
wProt.readalloc := in.aw.bits.cache(2)
wProt.writealloc := in.aw.bits.cache(3)
}
// Merge the AXI4 read/write requests into the TL-A channel.
TLArbiter(TLArbiter.roundRobin)(out.a, (0.U, rOut), (in.aw.bits.len, wOut))
/* Read/write response logic */
val okB = Wire(Irrevocable(new AXI4BundleB(edgeIn.bundle)))
val okR = Wire(Irrevocable(new AXI4BundleR(edgeIn.bundle)))
val dResp = Mux(out.d.bits.denied || out.d.bits.corrupt, AXI4Parameters.RESP_SLVERR, AXI4Parameters.RESP_OKAY)
val dHasData = edgeOut.hasData(out.d.bits)
val (_dFirst, dLast, _dDone, dCount) = edgeOut.count(out.d)
val dNumBeats1 = edgeOut.numBeats1(out.d.bits)
// Handle cases where writeack arrives before write is done
val writeEarlyAck = (UIntToOH(strippedResponseSourceId) & usedWriteIds) === 0.U
out.d.ready := Mux(dHasData, listBuffer.ioResponse.ready, okB.ready && !writeEarlyAck)
listBuffer.ioDataOut.ready := okR.ready
okR.valid := listBuffer.ioDataOut.valid
okB.valid := out.d.valid && !dHasData && !writeEarlyAck
listBuffer.ioResponse.valid := out.d.valid && dHasData
listBuffer.ioResponse.bits.index := strippedResponseSourceId
listBuffer.ioResponse.bits.data.data := out.d.bits.data
listBuffer.ioResponse.bits.data.resp := dResp
listBuffer.ioResponse.bits.data.last := dLast
listBuffer.ioResponse.bits.data.user :<= out.d.bits.user
listBuffer.ioResponse.bits.count := dCount
listBuffer.ioResponse.bits.numBeats1 := dNumBeats1
okR.bits.id := listBuffer.ioDataOut.bits.listIndex
okR.bits.data := listBuffer.ioDataOut.bits.payload.data
okR.bits.resp := listBuffer.ioDataOut.bits.payload.resp
okR.bits.last := listBuffer.ioDataOut.bits.payload.last
okR.bits.user :<= listBuffer.ioDataOut.bits.payload.user
// Upon the final beat in a write request, record a mapping from TileLink source ID to AXI write ID. Upon a write
// response, mark the write transaction as complete.
val writeIdMap = Mem(numTlTxns, UInt(log2Ceil(numIds).W))
val writeResponseId = writeIdMap.read(strippedResponseSourceId)
when(wOut.fire) {
writeIdMap.write(freeWriteIdIndex, in.aw.bits.id)
}
when(edgeOut.done(wOut)) {
usedWriteIdsSet := freeWriteIdOH
}
when(okB.fire) {
usedWriteIdsClr := UIntToOH(strippedResponseSourceId, numTlTxns)
}
okB.bits.id := writeResponseId
okB.bits.resp := dResp
okB.bits.user :<= out.d.bits.user
// AXI4 needs irrevocable behaviour
in.r <> Queue.irrevocable(okR, 1, flow = true)
in.b <> Queue.irrevocable(okB, 1, flow = true)
// Unused channels
out.b.ready := true.B
out.c.valid := false.B
out.e.valid := false.B
/* Alignment constraints. The AXI4Fragmenter should guarantee all of these constraints. */
def checkRequest[T <: AXI4BundleA](a: IrrevocableIO[T], reqType: String): Unit = {
val lReqType = reqType.toLowerCase
when(a.valid) {
assert(a.bits.len < maxBeats.U, s"$reqType burst length (%d) must be less than $maxBeats", a.bits.len + 1.U)
// Narrow transfers and FIXED bursts must be single-beat bursts.
when(a.bits.len =/= 0.U) {
assert(
a.bits.size === log2Ceil(beatBytes).U,
s"Narrow $lReqType transfers (%d < $beatBytes bytes) can't be multi-beat bursts (%d beats)",
1.U << a.bits.size,
a.bits.len + 1.U
)
assert(
a.bits.burst =/= AXI4Parameters.BURST_FIXED,
s"Fixed $lReqType bursts can't be multi-beat bursts (%d beats)",
a.bits.len + 1.U
)
}
// Furthermore, the transfer size (a.bits.bytes1() + 1.U) must be naturally-aligned to the address (in
// particular, during both WRAP and INCR bursts), but this constraint is already checked by TileLink
// Monitors. Note that this alignment requirement means that WRAP bursts are identical to INCR bursts.
}
}
checkRequest(in.ar, "Read")
checkRequest(in.aw, "Write")
}
}
}
object UnsafeAXI4ToTL {
def apply(numTlTxns: Int = 1, wcorrupt: Boolean = true)(implicit p: Parameters) = {
val axi42tl = LazyModule(new UnsafeAXI4ToTL(numTlTxns, wcorrupt))
axi42tl.node
}
}
/* ReservableListBuffer logic, and associated classes. */
class ResponsePayload[T <: Data](val data: T, val params: ReservableListBufferParameters) extends Bundle {
val index = UInt(params.entryBits.W)
val count = UInt(params.beatBits.W)
val numBeats1 = UInt(params.beatBits.W)
}
class DataOutPayload[T <: Data](val payload: T, val params: ReservableListBufferParameters) extends Bundle {
val listIndex = UInt(params.listBits.W)
}
/** Abstract base class to unify [[ReservableListBuffer]] and [[PassthroughListBuffer]]. */
abstract class BaseReservableListBuffer[T <: Data](gen: T, params: ReservableListBufferParameters)
extends Module {
require(params.numEntries > 0)
require(params.numLists > 0)
val ioReserve = IO(Flipped(Decoupled(UInt(params.listBits.W))))
val ioReservedIndex = IO(Output(UInt(params.entryBits.W)))
val ioResponse = IO(Flipped(Decoupled(new ResponsePayload(gen, params))))
val ioDataOut = IO(Decoupled(new DataOutPayload(gen, params)))
}
/** A modified version of 'ListBuffer' from 'sifive/block-inclusivecache-sifive'. This module forces users to reserve
* linked list entries (through the 'ioReserve' port) before writing data into those linked lists (through the
* 'ioResponse' port). Each response is tagged to indicate which linked list it is written into. The responses for a
* given linked list can come back out-of-order, but they will be read out through the 'ioDataOut' port in-order.
*
* ==Constructor==
* @param gen Chisel type of linked list data element
* @param params Other parameters
*
* ==Module IO==
* @param ioReserve Index of list to reserve a new element in
* @param ioReservedIndex Index of the entry that was reserved in the linked list, valid when 'ioReserve.fire'
* @param ioResponse Payload containing response data and linked-list-entry index
* @param ioDataOut Payload containing data read from response linked list and linked list index
*/
class ReservableListBuffer[T <: Data](gen: T, params: ReservableListBufferParameters)
extends BaseReservableListBuffer(gen, params) {
val valid = RegInit(0.U(params.numLists.W))
val head = Mem(params.numLists, UInt(params.entryBits.W))
val tail = Mem(params.numLists, UInt(params.entryBits.W))
val used = RegInit(0.U(params.numEntries.W))
val next = Mem(params.numEntries, UInt(params.entryBits.W))
val map = Mem(params.numEntries, UInt(params.listBits.W))
val dataMems = Seq.fill(params.numBeats) { SyncReadMem(params.numEntries, gen) }
val dataIsPresent = RegInit(0.U(params.numEntries.W))
val beats = Mem(params.numEntries, UInt(params.beatBits.W))
// The 'data' SRAM should be single-ported (read-or-write), since dual-ported SRAMs are significantly slower.
val dataMemReadEnable = WireDefault(false.B)
val dataMemWriteEnable = WireDefault(false.B)
assert(!(dataMemReadEnable && dataMemWriteEnable))
// 'freeOH' has a single bit set, which is the least-significant bit that is cleared in 'used'. So, it's the
// lowest-index entry in the 'data' RAM which is free.
val freeOH = Wire(UInt(params.numEntries.W))
val freeIndex = OHToUInt(freeOH)
freeOH := ~(leftOR(~used) << 1) & ~used
ioReservedIndex := freeIndex
val validSet = WireDefault(0.U(params.numLists.W))
val validClr = WireDefault(0.U(params.numLists.W))
val usedSet = WireDefault(0.U(params.numEntries.W))
val usedClr = WireDefault(0.U(params.numEntries.W))
val dataIsPresentSet = WireDefault(0.U(params.numEntries.W))
val dataIsPresentClr = WireDefault(0.U(params.numEntries.W))
valid := (valid & ~validClr) | validSet
used := (used & ~usedClr) | usedSet
dataIsPresent := (dataIsPresent & ~dataIsPresentClr) | dataIsPresentSet
/* Reservation logic signals */
val reserveTail = Wire(UInt(params.entryBits.W))
val reserveIsValid = Wire(Bool())
/* Response logic signals */
val responseIndex = Wire(UInt(params.entryBits.W))
val responseListIndex = Wire(UInt(params.listBits.W))
val responseHead = Wire(UInt(params.entryBits.W))
val responseTail = Wire(UInt(params.entryBits.W))
val nextResponseHead = Wire(UInt(params.entryBits.W))
val nextDataIsPresent = Wire(Bool())
val isResponseInOrder = Wire(Bool())
val isEndOfList = Wire(Bool())
val isLastBeat = Wire(Bool())
val isLastResponseBeat = Wire(Bool())
val isLastUnwindBeat = Wire(Bool())
/* Reservation logic */
reserveTail := tail.read(ioReserve.bits)
reserveIsValid := valid(ioReserve.bits)
ioReserve.ready := !used.andR
// When we want to append-to and destroy the same linked list on the same cycle, we need to take special care that we
// actually start a new list, rather than appending to a list that's about to disappear.
val reserveResponseSameList = ioReserve.bits === responseListIndex
val appendToAndDestroyList =
ioReserve.fire && ioDataOut.fire && reserveResponseSameList && isEndOfList && isLastBeat
when(ioReserve.fire) {
validSet := UIntToOH(ioReserve.bits, params.numLists)
usedSet := freeOH
when(reserveIsValid && !appendToAndDestroyList) {
next.write(reserveTail, freeIndex)
}.otherwise {
head.write(ioReserve.bits, freeIndex)
}
tail.write(ioReserve.bits, freeIndex)
map.write(freeIndex, ioReserve.bits)
}
/* Response logic */
// The majority of the response logic (reading from and writing to the various RAMs) is common between the
// response-from-IO case (ioResponse.fire) and the response-from-unwind case (unwindDataIsValid).
// The read from the 'next' RAM should be performed at the address given by 'responseHead'. However, we only use the
// 'nextResponseHead' signal when 'isResponseInOrder' is asserted (both in the response-from-IO and
// response-from-unwind cases), which implies that 'responseHead' equals 'responseIndex'. 'responseHead' comes after
// two back-to-back RAM reads, so indexing into the 'next' RAM with 'responseIndex' is much quicker.
responseHead := head.read(responseListIndex)
responseTail := tail.read(responseListIndex)
nextResponseHead := next.read(responseIndex)
nextDataIsPresent := dataIsPresent(nextResponseHead)
// Note that when 'isEndOfList' is asserted, 'nextResponseHead' (and therefore 'nextDataIsPresent') is invalid, since
// there isn't a next element in the linked list.
isResponseInOrder := responseHead === responseIndex
isEndOfList := responseHead === responseTail
isLastResponseBeat := ioResponse.bits.count === ioResponse.bits.numBeats1
// When a response's last beat is sent to the output channel, mark it as completed. This can happen in two
// situations:
// 1. We receive an in-order response, which travels straight from 'ioResponse' to 'ioDataOut'. The 'data' SRAM
// reservation was never needed.
// 2. An entry is read out of the 'data' SRAM (within the unwind FSM).
when(ioDataOut.fire && isLastBeat) {
// Mark the reservation as no-longer-used.
usedClr := UIntToOH(responseIndex, params.numEntries)
// If the response is in-order, then we're popping an element from this linked list.
when(isEndOfList) {
// Once we pop the last element from a linked list, mark it as no-longer-present.
validClr := UIntToOH(responseListIndex, params.numLists)
}.otherwise {
// Move the linked list's head pointer to the new head pointer.
head.write(responseListIndex, nextResponseHead)
}
}
// If we get an out-of-order response, then stash it in the 'data' SRAM for later unwinding.
when(ioResponse.fire && !isResponseInOrder) {
dataMemWriteEnable := true.B
when(isLastResponseBeat) {
dataIsPresentSet := UIntToOH(ioResponse.bits.index, params.numEntries)
beats.write(ioResponse.bits.index, ioResponse.bits.numBeats1)
}
}
// Use the 'ioResponse.bits.count' index (AKA the beat number) to select which 'data' SRAM to write to.
val responseCountOH = UIntToOH(ioResponse.bits.count, params.numBeats)
(responseCountOH.asBools zip dataMems) foreach { case (select, seqMem) =>
when(select && dataMemWriteEnable) {
seqMem.write(ioResponse.bits.index, ioResponse.bits.data)
}
}
/* Response unwind logic */
// Unwind FSM state definitions
val sIdle :: sUnwinding :: Nil = Enum(2)
val unwindState = RegInit(sIdle)
val busyUnwinding = unwindState === sUnwinding
val startUnwind = Wire(Bool())
val stopUnwind = Wire(Bool())
when(startUnwind) {
unwindState := sUnwinding
}.elsewhen(stopUnwind) {
unwindState := sIdle
}
assert(!(startUnwind && stopUnwind))
// Start the unwind FSM when there is an old out-of-order response stored in the 'data' SRAM that is now about to
// become the next in-order response. As noted previously, when 'isEndOfList' is asserted, 'nextDataIsPresent' is
// invalid.
//
// Note that since an in-order response from 'ioResponse' to 'ioDataOut' starts the unwind FSM, we don't have to
// worry about overwriting the 'data' SRAM's output when we start the unwind FSM.
startUnwind := ioResponse.fire && isResponseInOrder && isLastResponseBeat && !isEndOfList && nextDataIsPresent
// Stop the unwind FSM when the output channel consumes the final beat of an element from the unwind FSM, and one of
// two things happens:
// 1. We're still waiting for the next in-order response for this list (!nextDataIsPresent)
// 2. There are no more outstanding responses in this list (isEndOfList)
//
// Including 'busyUnwinding' ensures this is a single-cycle pulse, and it never fires while in-order transactions are
// passing from 'ioResponse' to 'ioDataOut'.
stopUnwind := busyUnwinding && ioDataOut.fire && isLastUnwindBeat && (!nextDataIsPresent || isEndOfList)
val isUnwindBurstOver = Wire(Bool())
val startNewBurst = startUnwind || (isUnwindBurstOver && dataMemReadEnable)
// Track the number of beats left to unwind for each list entry. At the start of a new burst, we flop the number of
// beats in this burst (minus 1) into 'unwindBeats1', and we reset the 'beatCounter' counter. With each beat, we
// increment 'beatCounter' until it reaches 'unwindBeats1'.
val unwindBeats1 = Reg(UInt(params.beatBits.W))
val nextBeatCounter = Wire(UInt(params.beatBits.W))
val beatCounter = RegNext(nextBeatCounter)
isUnwindBurstOver := beatCounter === unwindBeats1
when(startNewBurst) {
unwindBeats1 := beats.read(nextResponseHead)
nextBeatCounter := 0.U
}.elsewhen(dataMemReadEnable) {
nextBeatCounter := beatCounter + 1.U
}.otherwise {
nextBeatCounter := beatCounter
}
// When unwinding, feed the next linked-list head pointer (read out of the 'next' RAM) back so we can unwind the next
// entry in this linked list. Only update the pointer when we're actually moving to the next 'data' SRAM entry (which
// happens at the start of reading a new stored burst).
val unwindResponseIndex = RegEnable(nextResponseHead, startNewBurst)
responseIndex := Mux(busyUnwinding, unwindResponseIndex, ioResponse.bits.index)
// Hold 'nextResponseHead' static while we're in the middle of unwinding a multi-beat burst entry. We don't want the
// SRAM read address to shift while reading beats from a burst. Note that this is identical to 'nextResponseHead
// holdUnless startNewBurst', but 'unwindResponseIndex' already implements the 'RegEnable' signal in 'holdUnless'.
val unwindReadAddress = Mux(startNewBurst, nextResponseHead, unwindResponseIndex)
// The 'data' SRAM's output is valid if we read from the SRAM on the previous cycle. The SRAM's output stays valid
// until it is consumed by the output channel (and if we don't read from the SRAM again on that same cycle).
val unwindDataIsValid = RegInit(false.B)
when(dataMemReadEnable) {
unwindDataIsValid := true.B
}.elsewhen(ioDataOut.fire) {
unwindDataIsValid := false.B
}
isLastUnwindBeat := isUnwindBurstOver && unwindDataIsValid
// Indicates if this is the last beat for both 'ioResponse'-to-'ioDataOut' and unwind-to-'ioDataOut' beats.
isLastBeat := Mux(busyUnwinding, isLastUnwindBeat, isLastResponseBeat)
// Select which SRAM to read from based on the beat counter.
val dataOutputVec = Wire(Vec(params.numBeats, gen))
val nextBeatCounterOH = UIntToOH(nextBeatCounter, params.numBeats)
(nextBeatCounterOH.asBools zip dataMems).zipWithIndex foreach { case ((select, seqMem), i) =>
dataOutputVec(i) := seqMem.read(unwindReadAddress, select && dataMemReadEnable)
}
// Select the current 'data' SRAM output beat, and save the output in a register in case we're being back-pressured
// by 'ioDataOut'. This implements the functionality of 'readAndHold', but only on the single SRAM we're reading
// from.
val dataOutput = dataOutputVec(beatCounter) holdUnless RegNext(dataMemReadEnable)
// Mark 'data' burst entries as no-longer-present as they get read out of the SRAM.
when(dataMemReadEnable) {
dataIsPresentClr := UIntToOH(unwindReadAddress, params.numEntries)
}
// As noted above, when starting the unwind FSM, we know the 'data' SRAM's output isn't valid, so it's safe to issue
// a read command. Otherwise, only issue an SRAM read when the next 'unwindState' is 'sUnwinding', and if we know
// we're not going to overwrite the SRAM's current output (the SRAM output is already valid, and it's not going to be
// consumed by the output channel).
val dontReadFromDataMem = unwindDataIsValid && !ioDataOut.ready
dataMemReadEnable := startUnwind || (busyUnwinding && !stopUnwind && !dontReadFromDataMem)
// While unwinding, prevent new reservations from overwriting the current 'map' entry that we're using. We need
// 'responseListIndex' to be coherent for the entire unwind process.
val rawResponseListIndex = map.read(responseIndex)
val unwindResponseListIndex = RegEnable(rawResponseListIndex, startNewBurst)
responseListIndex := Mux(busyUnwinding, unwindResponseListIndex, rawResponseListIndex)
// Accept responses either when they can be passed through to the output channel, or if they're out-of-order and are
// just going to be stashed in the 'data' SRAM. Never accept a response payload when we're busy unwinding, since that
// could result in reading from and writing to the 'data' SRAM in the same cycle, and we want that SRAM to be
// single-ported.
ioResponse.ready := (ioDataOut.ready || !isResponseInOrder) && !busyUnwinding
// Either pass an in-order response to the output channel, or data read from the unwind FSM.
ioDataOut.valid := Mux(busyUnwinding, unwindDataIsValid, ioResponse.valid && isResponseInOrder)
ioDataOut.bits.listIndex := responseListIndex
ioDataOut.bits.payload := Mux(busyUnwinding, dataOutput, ioResponse.bits.data)
// It's an error to get a response that isn't associated with a valid linked list.
when(ioResponse.fire || unwindDataIsValid) {
assert(
valid(responseListIndex),
"No linked list exists at index %d, mapped from %d",
responseListIndex,
responseIndex
)
}
when(busyUnwinding && dataMemReadEnable) {
assert(isResponseInOrder, "Unwind FSM must read entries from SRAM in order")
}
}
/** Specialized version of [[ReservableListBuffer]] for the case of numEntries == 1.
*
* Much of the complex logic in [[ReservableListBuffer]] can disappear in this case. For instance, we don't have to
* reorder any responses, or store any linked lists.
*/
class PassthroughListBuffer[T <: Data](gen: T, params: ReservableListBufferParameters)
extends BaseReservableListBuffer(gen, params) {
require(params.numEntries == 1, s"PassthroughListBuffer is only valid when 'numEntries' (${params.numEntries}) is 1")
val used = RegInit(0.U(params.numEntries.W))
val map = Mem(params.numEntries, UInt(params.listBits.W))
val usedSet = WireDefault(0.U(params.numEntries.W))
val usedClr = WireDefault(0.U(params.numEntries.W))
used := (used & ~usedClr) | usedSet
ioReserve.ready := used === 0.U
// Store which list index was reserved, we need to return this value when we get a response.
when(ioReserve.fire) {
usedSet := 1.U
map.write(0.U, ioReserve.bits)
}
// There's only one valid linked list entry, which is at index 0.
ioReservedIndex := 0.U
val isLastResponseBeat = ioResponse.bits.count === ioResponse.bits.numBeats1
// Mark the linked list as empty when we get the last beat in a response.
// Note that 'ioResponse.fire === ioDataOut.fire'.
when(ioResponse.fire && isLastResponseBeat) {
usedClr := 1.U
}
// Always pass the response data straight through, since we never need to reorder the response data.
ioDataOut.bits.listIndex := map.read(0.U)
ioDataOut.bits.payload := ioResponse.bits.data
ioDataOut.valid := ioResponse.valid
ioResponse.ready := ioDataOut.ready
}
| module dataMems_389( // @[UnsafeAXI4ToTL.scala:365:62]
input [4:0] R0_addr,
input R0_en,
input R0_clk,
output [66:0] R0_data,
input [4:0] W0_addr,
input W0_en,
input W0_clk,
input [66:0] W0_data
);
dataMems_0_ext dataMems_0_ext ( // @[UnsafeAXI4ToTL.scala:365:62]
.R0_addr (R0_addr),
.R0_en (R0_en),
.R0_clk (R0_clk),
.R0_data (R0_data),
.W0_addr (W0_addr),
.W0_en (W0_en),
.W0_clk (W0_clk),
.W0_data (W0_data)
); // @[UnsafeAXI4ToTL.scala:365:62]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File SynchronizerReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util.{RegEnable, Cat}
/** These wrap behavioral
* shift and next registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
*
* These are built up of *ResetSynchronizerPrimitiveShiftReg,
* intended to be replaced by the integrator's metastable flops chains or replaced
* at this level if they have a multi-bit wide synchronizer primitive.
* The different types vary in their reset behavior:
* NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin
* AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep
* 1-bit-wide shift registers.
* SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg
*
* [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference.
*
* ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross
* Clock Domains.
*/
object SynchronizerResetType extends Enumeration {
val NonSync, Inferred, Sync, Async = Value
}
// Note: this should not be used directly.
// Use the companion object to generate this with the correct reset type mixin.
private class SynchronizerPrimitiveShiftReg(
sync: Int,
init: Boolean,
resetType: SynchronizerResetType.Value)
extends AbstractPipelineReg(1) {
val initInt = if (init) 1 else 0
val initPostfix = resetType match {
case SynchronizerResetType.NonSync => ""
case _ => s"_i${initInt}"
}
override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}"
val chain = List.tabulate(sync) { i =>
val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B)
reg.suggestName(s"sync_$i")
}
chain.last := io.d.asBool
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink := source
}
io.q := chain.head.asUInt
}
private object SynchronizerPrimitiveShiftReg {
def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = {
val gen: () => SynchronizerPrimitiveShiftReg = resetType match {
case SynchronizerResetType.NonSync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
case SynchronizerResetType.Async =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset
case SynchronizerResetType.Sync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset
case SynchronizerResetType.Inferred =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
}
AbstractPipelineReg(gen(), in)
}
}
// Note: This module may end up with a non-AsyncReset type reset.
// But the Primitives within will always have AsyncReset type.
class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int)
extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asAsyncReset){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async)
}
}
io.q := Cat(output.reverse)
}
object AsyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
// Note: This module may end up with a non-Bool type reset.
// But the Primitives within will always have Bool reset type.
@deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2")
class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asBool){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync)
}
}
io.q := Cat(output.reverse)
}
object SyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred)
}
io.q := Cat(output.reverse)
}
object ResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}"
val output = Seq.tabulate(w) { i =>
SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync)
}
io.q := Cat(output.reverse)
}
object SynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, None)
def apply [T <: Data](in: T): T =
apply (in, 3, None)
}
class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module {
override def desiredName = s"ClockCrossingReg_w${w}"
val io = IO(new Bundle{
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
val en = Input(Bool())
})
val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en)
io.q := cdc_reg
}
object ClockCrossingReg {
def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = {
val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit))
name.foreach{ cdc_reg.suggestName(_) }
cdc_reg.io.d := in.asUInt
cdc_reg.io.en := en
cdc_reg.io.q.asTypeOf(in)
}
}
| module AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_235( // @[SynchronizerReg.scala:68:19]
input clock, // @[SynchronizerReg.scala:68:19]
input reset, // @[SynchronizerReg.scala:68:19]
input io_d, // @[ShiftReg.scala:36:14]
output io_q // @[ShiftReg.scala:36:14]
);
wire io_d_0 = io_d; // @[SynchronizerReg.scala:68:19]
wire _sync_2_T = io_d_0; // @[SynchronizerReg.scala:54:22, :68:19]
wire io_q_0; // @[SynchronizerReg.scala:68:19]
reg sync_0; // @[SynchronizerReg.scala:51:87]
assign io_q_0 = sync_0; // @[SynchronizerReg.scala:51:87, :68:19]
reg sync_1; // @[SynchronizerReg.scala:51:87]
reg sync_2; // @[SynchronizerReg.scala:51:87]
always @(posedge clock or posedge reset) begin // @[SynchronizerReg.scala:68:19]
if (reset) begin // @[SynchronizerReg.scala:68:19]
sync_0 <= 1'h0; // @[SynchronizerReg.scala:51:87]
sync_1 <= 1'h0; // @[SynchronizerReg.scala:51:87]
sync_2 <= 1'h0; // @[SynchronizerReg.scala:51:87]
end
else begin // @[SynchronizerReg.scala:68:19]
sync_0 <= sync_1; // @[SynchronizerReg.scala:51:87]
sync_1 <= sync_2; // @[SynchronizerReg.scala:51:87]
sync_2 <= _sync_2_T; // @[SynchronizerReg.scala:51:87, :54:22]
end
always @(posedge, posedge) |
Generate the Verilog code corresponding to the following Chisel files.
File RecFNToRecFN.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import consts._
class
RecFNToRecFN(
inExpWidth: Int, inSigWidth: Int, outExpWidth: Int, outSigWidth: Int)
extends chisel3.RawModule
{
val io = IO(new Bundle {
val in = Input(Bits((inExpWidth + inSigWidth + 1).W))
val roundingMode = Input(UInt(3.W))
val detectTininess = Input(UInt(1.W))
val out = Output(Bits((outExpWidth + outSigWidth + 1).W))
val exceptionFlags = Output(Bits(5.W))
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val rawIn = rawFloatFromRecFN(inExpWidth, inSigWidth, io.in);
if ((inExpWidth == outExpWidth) && (inSigWidth <= outSigWidth)) {
//--------------------------------------------------------------------
//--------------------------------------------------------------------
io.out := io.in<<(outSigWidth - inSigWidth)
io.exceptionFlags := isSigNaNRawFloat(rawIn) ## 0.U(4.W)
} else {
//--------------------------------------------------------------------
//--------------------------------------------------------------------
val roundAnyRawFNToRecFN =
Module(
new RoundAnyRawFNToRecFN(
inExpWidth,
inSigWidth,
outExpWidth,
outSigWidth,
flRoundOpt_sigMSBitAlwaysZero
))
roundAnyRawFNToRecFN.io.invalidExc := isSigNaNRawFloat(rawIn)
roundAnyRawFNToRecFN.io.infiniteExc := false.B
roundAnyRawFNToRecFN.io.in := rawIn
roundAnyRawFNToRecFN.io.roundingMode := io.roundingMode
roundAnyRawFNToRecFN.io.detectTininess := io.detectTininess
io.out := roundAnyRawFNToRecFN.io.out
io.exceptionFlags := roundAnyRawFNToRecFN.io.exceptionFlags
}
}
File rawFloatFromRecFN.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import chisel3.util._
/*----------------------------------------------------------------------------
| In the result, no more than one of 'isNaN', 'isInf', and 'isZero' will be
| set.
*----------------------------------------------------------------------------*/
object rawFloatFromRecFN
{
def apply(expWidth: Int, sigWidth: Int, in: Bits): RawFloat =
{
val exp = in(expWidth + sigWidth - 1, sigWidth - 1)
val isZero = exp(expWidth, expWidth - 2) === 0.U
val isSpecial = exp(expWidth, expWidth - 1) === 3.U
val out = Wire(new RawFloat(expWidth, sigWidth))
out.isNaN := isSpecial && exp(expWidth - 2)
out.isInf := isSpecial && ! exp(expWidth - 2)
out.isZero := isZero
out.sign := in(expWidth + sigWidth)
out.sExp := exp.zext
out.sig := 0.U(1.W) ## ! isZero ## in(sigWidth - 2, 0)
out
}
}
| module RecFNToRecFN_235( // @[RecFNToRecFN.scala:44:5]
input [32:0] io_in, // @[RecFNToRecFN.scala:48:16]
output [32:0] io_out // @[RecFNToRecFN.scala:48:16]
);
wire [32:0] io_in_0 = io_in; // @[RecFNToRecFN.scala:44:5]
wire io_detectTininess = 1'h1; // @[RecFNToRecFN.scala:44:5, :48:16]
wire [2:0] io_roundingMode = 3'h0; // @[RecFNToRecFN.scala:44:5, :48:16]
wire [32:0] _io_out_T = io_in_0; // @[RecFNToRecFN.scala:44:5, :64:35]
wire [4:0] _io_exceptionFlags_T_3; // @[RecFNToRecFN.scala:65:54]
wire [32:0] io_out_0; // @[RecFNToRecFN.scala:44:5]
wire [4:0] io_exceptionFlags; // @[RecFNToRecFN.scala:44:5]
wire [8:0] rawIn_exp = io_in_0[31:23]; // @[rawFloatFromRecFN.scala:51:21]
wire [2:0] _rawIn_isZero_T = rawIn_exp[8:6]; // @[rawFloatFromRecFN.scala:51:21, :52:28]
wire rawIn_isZero = _rawIn_isZero_T == 3'h0; // @[rawFloatFromRecFN.scala:52:{28,53}]
wire rawIn_isZero_0 = rawIn_isZero; // @[rawFloatFromRecFN.scala:52:53, :55:23]
wire [1:0] _rawIn_isSpecial_T = rawIn_exp[8:7]; // @[rawFloatFromRecFN.scala:51:21, :53:28]
wire rawIn_isSpecial = &_rawIn_isSpecial_T; // @[rawFloatFromRecFN.scala:53:{28,53}]
wire _rawIn_out_isNaN_T_1; // @[rawFloatFromRecFN.scala:56:33]
wire _rawIn_out_isInf_T_2; // @[rawFloatFromRecFN.scala:57:33]
wire _rawIn_out_sign_T; // @[rawFloatFromRecFN.scala:59:25]
wire [9:0] _rawIn_out_sExp_T; // @[rawFloatFromRecFN.scala:60:27]
wire [24:0] _rawIn_out_sig_T_3; // @[rawFloatFromRecFN.scala:61:44]
wire rawIn_isNaN; // @[rawFloatFromRecFN.scala:55:23]
wire rawIn_isInf; // @[rawFloatFromRecFN.scala:55:23]
wire rawIn_sign; // @[rawFloatFromRecFN.scala:55:23]
wire [9:0] rawIn_sExp; // @[rawFloatFromRecFN.scala:55:23]
wire [24:0] rawIn_sig; // @[rawFloatFromRecFN.scala:55:23]
wire _rawIn_out_isNaN_T = rawIn_exp[6]; // @[rawFloatFromRecFN.scala:51:21, :56:41]
wire _rawIn_out_isInf_T = rawIn_exp[6]; // @[rawFloatFromRecFN.scala:51:21, :56:41, :57:41]
assign _rawIn_out_isNaN_T_1 = rawIn_isSpecial & _rawIn_out_isNaN_T; // @[rawFloatFromRecFN.scala:53:53, :56:{33,41}]
assign rawIn_isNaN = _rawIn_out_isNaN_T_1; // @[rawFloatFromRecFN.scala:55:23, :56:33]
wire _rawIn_out_isInf_T_1 = ~_rawIn_out_isInf_T; // @[rawFloatFromRecFN.scala:57:{36,41}]
assign _rawIn_out_isInf_T_2 = rawIn_isSpecial & _rawIn_out_isInf_T_1; // @[rawFloatFromRecFN.scala:53:53, :57:{33,36}]
assign rawIn_isInf = _rawIn_out_isInf_T_2; // @[rawFloatFromRecFN.scala:55:23, :57:33]
assign _rawIn_out_sign_T = io_in_0[32]; // @[rawFloatFromRecFN.scala:59:25]
assign rawIn_sign = _rawIn_out_sign_T; // @[rawFloatFromRecFN.scala:55:23, :59:25]
assign _rawIn_out_sExp_T = {1'h0, rawIn_exp}; // @[rawFloatFromRecFN.scala:51:21, :60:27]
assign rawIn_sExp = _rawIn_out_sExp_T; // @[rawFloatFromRecFN.scala:55:23, :60:27]
wire _rawIn_out_sig_T = ~rawIn_isZero; // @[rawFloatFromRecFN.scala:52:53, :61:35]
wire [1:0] _rawIn_out_sig_T_1 = {1'h0, _rawIn_out_sig_T}; // @[rawFloatFromRecFN.scala:61:{32,35}]
wire [22:0] _rawIn_out_sig_T_2 = io_in_0[22:0]; // @[rawFloatFromRecFN.scala:61:49]
assign _rawIn_out_sig_T_3 = {_rawIn_out_sig_T_1, _rawIn_out_sig_T_2}; // @[rawFloatFromRecFN.scala:61:{32,44,49}]
assign rawIn_sig = _rawIn_out_sig_T_3; // @[rawFloatFromRecFN.scala:55:23, :61:44]
assign io_out_0 = _io_out_T; // @[RecFNToRecFN.scala:44:5, :64:35]
wire _io_exceptionFlags_T = rawIn_sig[22]; // @[rawFloatFromRecFN.scala:55:23]
wire _io_exceptionFlags_T_1 = ~_io_exceptionFlags_T; // @[common.scala:82:{49,56}]
wire _io_exceptionFlags_T_2 = rawIn_isNaN & _io_exceptionFlags_T_1; // @[rawFloatFromRecFN.scala:55:23]
assign _io_exceptionFlags_T_3 = {_io_exceptionFlags_T_2, 4'h0}; // @[common.scala:82:46]
assign io_exceptionFlags = _io_exceptionFlags_T_3; // @[RecFNToRecFN.scala:44:5, :65:54]
assign io_out = io_out_0; // @[RecFNToRecFN.scala:44:5]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File SinkX.scala:
/*
* Copyright 2019 SiFive, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You should have received a copy of LICENSE.Apache2 along with
* this software. If not, you may obtain a copy at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sifive.blocks.inclusivecache
import chisel3._
import chisel3.util._
class SinkXRequest(params: InclusiveCacheParameters) extends InclusiveCacheBundle(params)
{
val address = UInt(params.inner.bundle.addressBits.W)
}
class SinkX(params: InclusiveCacheParameters) extends Module
{
val io = IO(new Bundle {
val req = Decoupled(new FullRequest(params))
val x = Flipped(Decoupled(new SinkXRequest(params)))
})
val x = Queue(io.x, 1)
val (tag, set, offset) = params.parseAddress(x.bits.address)
x.ready := io.req.ready
io.req.valid := x.valid
params.ccover(x.valid && !x.ready, "SINKX_STALL", "Backpressure when accepting a control message")
io.req.bits.prio := VecInit(1.U(3.W).asBools) // same prio as A
io.req.bits.control:= true.B
io.req.bits.opcode := 0.U
io.req.bits.param := 0.U
io.req.bits.size := params.offsetBits.U
// The source does not matter, because a flush command never allocates a way.
// However, it must be a legal source, otherwise assertions might spuriously fire.
io.req.bits.source := params.inner.client.clients.map(_.sourceId.start).min.U
io.req.bits.offset := 0.U
io.req.bits.set := set
io.req.bits.tag := tag
io.req.bits.put := 0.U
}
File Parameters.scala:
/*
* Copyright 2019 SiFive, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You should have received a copy of LICENSE.Apache2 along with
* this software. If not, you may obtain a copy at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sifive.blocks.inclusivecache
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config._
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.tilelink._
import freechips.rocketchip.util._
import freechips.rocketchip.util.property.cover
import scala.math.{min,max}
case class CacheParameters(
level: Int,
ways: Int,
sets: Int,
blockBytes: Int,
beatBytes: Int, // inner
hintsSkipProbe: Boolean)
{
require (ways > 0)
require (sets > 0)
require (blockBytes > 0 && isPow2(blockBytes))
require (beatBytes > 0 && isPow2(beatBytes))
require (blockBytes >= beatBytes)
val blocks = ways * sets
val sizeBytes = blocks * blockBytes
val blockBeats = blockBytes/beatBytes
}
case class InclusiveCachePortParameters(
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)
{
def apply()(implicit p: Parameters, valName: ValName) = LazyModule(new TLBuffer(a, b, c, d, e))
}
object InclusiveCachePortParameters
{
val none = InclusiveCachePortParameters(
a = BufferParams.none,
b = BufferParams.none,
c = BufferParams.none,
d = BufferParams.none,
e = BufferParams.none)
val full = InclusiveCachePortParameters(
a = BufferParams.default,
b = BufferParams.default,
c = BufferParams.default,
d = BufferParams.default,
e = BufferParams.default)
// This removes feed-through paths from C=>A and A=>C
val fullC = InclusiveCachePortParameters(
a = BufferParams.none,
b = BufferParams.none,
c = BufferParams.default,
d = BufferParams.none,
e = BufferParams.none)
val flowAD = InclusiveCachePortParameters(
a = BufferParams.flow,
b = BufferParams.none,
c = BufferParams.none,
d = BufferParams.flow,
e = BufferParams.none)
val flowAE = InclusiveCachePortParameters(
a = BufferParams.flow,
b = BufferParams.none,
c = BufferParams.none,
d = BufferParams.none,
e = BufferParams.flow)
// For innerBuf:
// SinkA: no restrictions, flows into scheduler+putbuffer
// SourceB: no restrictions, flows out of scheduler
// sinkC: no restrictions, flows into scheduler+putbuffer & buffered to bankedStore
// SourceD: no restrictions, flows out of bankedStore/regout
// SinkE: no restrictions, flows into scheduler
//
// ... so while none is possible, you probably want at least flowAC to cut ready
// from the scheduler delay and flowD to ease SourceD back-pressure
// For outerBufer:
// SourceA: must not be pipe, flows out of scheduler
// SinkB: no restrictions, flows into scheduler
// SourceC: pipe is useless, flows out of bankedStore/regout, parameter depth ignored
// SinkD: no restrictions, flows into scheduler & bankedStore
// SourceE: must not be pipe, flows out of scheduler
//
// ... AE take the channel ready into the scheduler, so you need at least flowAE
}
case class InclusiveCacheMicroParameters(
writeBytes: Int, // backing store update granularity
memCycles: Int = 40, // # of L2 clock cycles for a memory round-trip (50ns @ 800MHz)
portFactor: Int = 4, // numSubBanks = (widest TL port * portFactor) / writeBytes
dirReg: Boolean = false,
innerBuf: InclusiveCachePortParameters = InclusiveCachePortParameters.fullC, // or none
outerBuf: InclusiveCachePortParameters = InclusiveCachePortParameters.full) // or flowAE
{
require (writeBytes > 0 && isPow2(writeBytes))
require (memCycles > 0)
require (portFactor >= 2) // for inner RMW and concurrent outer Relase + Grant
}
case class InclusiveCacheControlParameters(
address: BigInt,
beatBytes: Int,
bankedControl: Boolean)
case class InclusiveCacheParameters(
cache: CacheParameters,
micro: InclusiveCacheMicroParameters,
control: Boolean,
inner: TLEdgeIn,
outer: TLEdgeOut)(implicit val p: Parameters)
{
require (cache.ways > 1)
require (cache.sets > 1 && isPow2(cache.sets))
require (micro.writeBytes <= inner.manager.beatBytes)
require (micro.writeBytes <= outer.manager.beatBytes)
require (inner.manager.beatBytes <= cache.blockBytes)
require (outer.manager.beatBytes <= cache.blockBytes)
// Require that all cached address ranges have contiguous blocks
outer.manager.managers.flatMap(_.address).foreach { a =>
require (a.alignment >= cache.blockBytes)
}
// If we are the first level cache, we do not need to support inner-BCE
val firstLevel = !inner.client.clients.exists(_.supports.probe)
// If we are the last level cache, we do not need to support outer-B
val lastLevel = !outer.manager.managers.exists(_.regionType > RegionType.UNCACHED)
require (lastLevel)
// Provision enough resources to achieve full throughput with missing single-beat accesses
val mshrs = InclusiveCacheParameters.all_mshrs(cache, micro)
val secondary = max(mshrs, micro.memCycles - mshrs)
val putLists = micro.memCycles // allow every request to be single beat
val putBeats = max(2*cache.blockBeats, micro.memCycles)
val relLists = 2
val relBeats = relLists*cache.blockBeats
val flatAddresses = AddressSet.unify(outer.manager.managers.flatMap(_.address))
val pickMask = AddressDecoder(flatAddresses.map(Seq(_)), flatAddresses.map(_.mask).reduce(_|_))
def bitOffsets(x: BigInt, offset: Int = 0, tail: List[Int] = List.empty[Int]): List[Int] =
if (x == 0) tail.reverse else bitOffsets(x >> 1, offset + 1, if ((x & 1) == 1) offset :: tail else tail)
val addressMapping = bitOffsets(pickMask)
val addressBits = addressMapping.size
// println(s"addresses: ${flatAddresses} => ${pickMask} => ${addressBits}")
val allClients = inner.client.clients.size
val clientBitsRaw = inner.client.clients.filter(_.supports.probe).size
val clientBits = max(1, clientBitsRaw)
val stateBits = 2
val wayBits = log2Ceil(cache.ways)
val setBits = log2Ceil(cache.sets)
val offsetBits = log2Ceil(cache.blockBytes)
val tagBits = addressBits - setBits - offsetBits
val putBits = log2Ceil(max(putLists, relLists))
require (tagBits > 0)
require (offsetBits > 0)
val innerBeatBits = (offsetBits - log2Ceil(inner.manager.beatBytes)) max 1
val outerBeatBits = (offsetBits - log2Ceil(outer.manager.beatBytes)) max 1
val innerMaskBits = inner.manager.beatBytes / micro.writeBytes
val outerMaskBits = outer.manager.beatBytes / micro.writeBytes
def clientBit(source: UInt): UInt = {
if (clientBitsRaw == 0) {
0.U
} else {
Cat(inner.client.clients.filter(_.supports.probe).map(_.sourceId.contains(source)).reverse)
}
}
def clientSource(bit: UInt): UInt = {
if (clientBitsRaw == 0) {
0.U
} else {
Mux1H(bit, inner.client.clients.filter(_.supports.probe).map(c => c.sourceId.start.U))
}
}
def parseAddress(x: UInt): (UInt, UInt, UInt) = {
val offset = Cat(addressMapping.map(o => x(o,o)).reverse)
val set = offset >> offsetBits
val tag = set >> setBits
(tag(tagBits-1, 0), set(setBits-1, 0), offset(offsetBits-1, 0))
}
def widen(x: UInt, width: Int): UInt = {
val y = x | 0.U(width.W)
assert (y >> width === 0.U)
y(width-1, 0)
}
def expandAddress(tag: UInt, set: UInt, offset: UInt): UInt = {
val base = Cat(widen(tag, tagBits), widen(set, setBits), widen(offset, offsetBits))
val bits = Array.fill(outer.bundle.addressBits) { 0.U(1.W) }
addressMapping.zipWithIndex.foreach { case (a, i) => bits(a) = base(i,i) }
Cat(bits.reverse)
}
def restoreAddress(expanded: UInt): UInt = {
val missingBits = flatAddresses
.map { a => (a.widen(pickMask).base, a.widen(~pickMask)) } // key is the bits to restore on match
.groupBy(_._1)
.view
.mapValues(_.map(_._2))
val muxMask = AddressDecoder(missingBits.values.toList)
val mux = missingBits.toList.map { case (bits, addrs) =>
val widen = addrs.map(_.widen(~muxMask))
val matches = AddressSet
.unify(widen.distinct)
.map(_.contains(expanded))
.reduce(_ || _)
(matches, bits.U)
}
expanded | Mux1H(mux)
}
def dirReg[T <: Data](x: T, en: Bool = true.B): T = {
if (micro.dirReg) RegEnable(x, en) else x
}
def ccover(cond: Bool, label: String, desc: String)(implicit sourceInfo: SourceInfo) =
cover(cond, "CCACHE_L" + cache.level + "_" + label, "MemorySystem;;" + desc)
}
object MetaData
{
val stateBits = 2
def INVALID: UInt = 0.U(stateBits.W) // way is empty
def BRANCH: UInt = 1.U(stateBits.W) // outer slave cache is trunk
def TRUNK: UInt = 2.U(stateBits.W) // unique inner master cache is trunk
def TIP: UInt = 3.U(stateBits.W) // we are trunk, inner masters are branch
// Does a request need trunk?
def needT(opcode: UInt, param: UInt): Bool = {
!opcode(2) ||
(opcode === TLMessages.Hint && param === TLHints.PREFETCH_WRITE) ||
((opcode === TLMessages.AcquireBlock || opcode === TLMessages.AcquirePerm) && param =/= TLPermissions.NtoB)
}
// Does a request prove the client need not be probed?
def skipProbeN(opcode: UInt, hintsSkipProbe: Boolean): Bool = {
// Acquire(toB) and Get => is N, so no probe
// Acquire(*toT) => is N or B, but need T, so no probe
// Hint => could be anything, so probe IS needed, if hintsSkipProbe is enabled, skip probe the same client
// Put* => is N or B, so probe IS needed
opcode === TLMessages.AcquireBlock || opcode === TLMessages.AcquirePerm || opcode === TLMessages.Get || (opcode === TLMessages.Hint && hintsSkipProbe.B)
}
def isToN(param: UInt): Bool = {
param === TLPermissions.TtoN || param === TLPermissions.BtoN || param === TLPermissions.NtoN
}
def isToB(param: UInt): Bool = {
param === TLPermissions.TtoB || param === TLPermissions.BtoB
}
}
object InclusiveCacheParameters
{
val lfsrBits = 10
val L2ControlAddress = 0x2010000
val L2ControlSize = 0x1000
def out_mshrs(cache: CacheParameters, micro: InclusiveCacheMicroParameters): Int = {
// We need 2-3 normal MSHRs to cover the Directory latency
// To fully exploit memory bandwidth-delay-product, we need memCyles/blockBeats MSHRs
max(if (micro.dirReg) 3 else 2, (micro.memCycles + cache.blockBeats - 1) / cache.blockBeats)
}
def all_mshrs(cache: CacheParameters, micro: InclusiveCacheMicroParameters): Int =
// We need a dedicated MSHR for B+C each
2 + out_mshrs(cache, micro)
}
class InclusiveCacheBundle(params: InclusiveCacheParameters) extends Bundle
| module SinkX( // @[SinkX.scala:28:7]
input clock, // @[SinkX.scala:28:7]
input reset, // @[SinkX.scala:28:7]
input io_req_ready, // @[SinkX.scala:30:14]
output io_req_valid, // @[SinkX.scala:30:14]
output [12:0] io_req_bits_tag, // @[SinkX.scala:30:14]
output [9:0] io_req_bits_set, // @[SinkX.scala:30:14]
output io_x_ready, // @[SinkX.scala:30:14]
input io_x_valid, // @[SinkX.scala:30:14]
input [31:0] io_x_bits_address // @[SinkX.scala:30:14]
);
wire [31:0] _x_q_io_deq_bits_address; // @[Decoupled.scala:362:21]
wire io_req_ready_0 = io_req_ready; // @[SinkX.scala:28:7]
wire io_x_valid_0 = io_x_valid; // @[SinkX.scala:28:7]
wire [31:0] io_x_bits_address_0 = io_x_bits_address; // @[SinkX.scala:28:7]
wire [5:0] io_req_bits_offset = 6'h0; // @[SinkX.scala:28:7]
wire [5:0] io_req_bits_put = 6'h0; // @[SinkX.scala:28:7]
wire [7:0] io_req_bits_source = 8'h0; // @[SinkX.scala:28:7]
wire [2:0] io_req_bits_size = 3'h6; // @[SinkX.scala:28:7]
wire [2:0] io_req_bits_opcode = 3'h0; // @[SinkX.scala:28:7]
wire [2:0] io_req_bits_param = 3'h0; // @[SinkX.scala:28:7]
wire io_req_bits_prio_1 = 1'h0; // @[SinkX.scala:28:7]
wire io_req_bits_prio_2 = 1'h0; // @[SinkX.scala:28:7]
wire io_req_bits_prio_0 = 1'h1; // @[SinkX.scala:28:7]
wire io_req_bits_control = 1'h1; // @[SinkX.scala:28:7]
wire [12:0] tag_1; // @[Parameters.scala:217:9]
wire [9:0] set_1; // @[Parameters.scala:217:28]
wire [12:0] io_req_bits_tag_0; // @[SinkX.scala:28:7]
wire [9:0] io_req_bits_set_0; // @[SinkX.scala:28:7]
wire io_req_valid_0; // @[SinkX.scala:28:7]
wire io_x_ready_0; // @[SinkX.scala:28:7]
wire _offset_T = _x_q_io_deq_bits_address[0]; // @[Decoupled.scala:362:21]
wire _offset_T_1 = _x_q_io_deq_bits_address[1]; // @[Decoupled.scala:362:21]
wire _offset_T_2 = _x_q_io_deq_bits_address[2]; // @[Decoupled.scala:362:21]
wire _offset_T_3 = _x_q_io_deq_bits_address[3]; // @[Decoupled.scala:362:21]
wire _offset_T_4 = _x_q_io_deq_bits_address[4]; // @[Decoupled.scala:362:21]
wire _offset_T_5 = _x_q_io_deq_bits_address[5]; // @[Decoupled.scala:362:21]
wire _offset_T_6 = _x_q_io_deq_bits_address[6]; // @[Decoupled.scala:362:21]
wire _offset_T_7 = _x_q_io_deq_bits_address[7]; // @[Decoupled.scala:362:21]
wire _offset_T_8 = _x_q_io_deq_bits_address[8]; // @[Decoupled.scala:362:21]
wire _offset_T_9 = _x_q_io_deq_bits_address[9]; // @[Decoupled.scala:362:21]
wire _offset_T_10 = _x_q_io_deq_bits_address[10]; // @[Decoupled.scala:362:21]
wire _offset_T_11 = _x_q_io_deq_bits_address[11]; // @[Decoupled.scala:362:21]
wire _offset_T_12 = _x_q_io_deq_bits_address[12]; // @[Decoupled.scala:362:21]
wire _offset_T_13 = _x_q_io_deq_bits_address[13]; // @[Decoupled.scala:362:21]
wire _offset_T_14 = _x_q_io_deq_bits_address[14]; // @[Decoupled.scala:362:21]
wire _offset_T_15 = _x_q_io_deq_bits_address[15]; // @[Decoupled.scala:362:21]
wire _offset_T_16 = _x_q_io_deq_bits_address[16]; // @[Decoupled.scala:362:21]
wire _offset_T_17 = _x_q_io_deq_bits_address[17]; // @[Decoupled.scala:362:21]
wire _offset_T_18 = _x_q_io_deq_bits_address[18]; // @[Decoupled.scala:362:21]
wire _offset_T_19 = _x_q_io_deq_bits_address[19]; // @[Decoupled.scala:362:21]
wire _offset_T_20 = _x_q_io_deq_bits_address[20]; // @[Decoupled.scala:362:21]
wire _offset_T_21 = _x_q_io_deq_bits_address[21]; // @[Decoupled.scala:362:21]
wire _offset_T_22 = _x_q_io_deq_bits_address[22]; // @[Decoupled.scala:362:21]
wire _offset_T_23 = _x_q_io_deq_bits_address[23]; // @[Decoupled.scala:362:21]
wire _offset_T_24 = _x_q_io_deq_bits_address[24]; // @[Decoupled.scala:362:21]
wire _offset_T_25 = _x_q_io_deq_bits_address[25]; // @[Decoupled.scala:362:21]
wire _offset_T_26 = _x_q_io_deq_bits_address[26]; // @[Decoupled.scala:362:21]
wire _offset_T_27 = _x_q_io_deq_bits_address[27]; // @[Decoupled.scala:362:21]
wire _offset_T_28 = _x_q_io_deq_bits_address[31]; // @[Decoupled.scala:362:21]
wire [1:0] offset_lo_lo_lo_hi = {_offset_T_2, _offset_T_1}; // @[Parameters.scala:214:{21,47}]
wire [2:0] offset_lo_lo_lo = {offset_lo_lo_lo_hi, _offset_T}; // @[Parameters.scala:214:{21,47}]
wire [1:0] offset_lo_lo_hi_lo = {_offset_T_4, _offset_T_3}; // @[Parameters.scala:214:{21,47}]
wire [1:0] offset_lo_lo_hi_hi = {_offset_T_6, _offset_T_5}; // @[Parameters.scala:214:{21,47}]
wire [3:0] offset_lo_lo_hi = {offset_lo_lo_hi_hi, offset_lo_lo_hi_lo}; // @[Parameters.scala:214:21]
wire [6:0] offset_lo_lo = {offset_lo_lo_hi, offset_lo_lo_lo}; // @[Parameters.scala:214:21]
wire [1:0] offset_lo_hi_lo_hi = {_offset_T_9, _offset_T_8}; // @[Parameters.scala:214:{21,47}]
wire [2:0] offset_lo_hi_lo = {offset_lo_hi_lo_hi, _offset_T_7}; // @[Parameters.scala:214:{21,47}]
wire [1:0] offset_lo_hi_hi_lo = {_offset_T_11, _offset_T_10}; // @[Parameters.scala:214:{21,47}]
wire [1:0] offset_lo_hi_hi_hi = {_offset_T_13, _offset_T_12}; // @[Parameters.scala:214:{21,47}]
wire [3:0] offset_lo_hi_hi = {offset_lo_hi_hi_hi, offset_lo_hi_hi_lo}; // @[Parameters.scala:214:21]
wire [6:0] offset_lo_hi = {offset_lo_hi_hi, offset_lo_hi_lo}; // @[Parameters.scala:214:21]
wire [13:0] offset_lo = {offset_lo_hi, offset_lo_lo}; // @[Parameters.scala:214:21]
wire [1:0] offset_hi_lo_lo_hi = {_offset_T_16, _offset_T_15}; // @[Parameters.scala:214:{21,47}]
wire [2:0] offset_hi_lo_lo = {offset_hi_lo_lo_hi, _offset_T_14}; // @[Parameters.scala:214:{21,47}]
wire [1:0] offset_hi_lo_hi_lo = {_offset_T_18, _offset_T_17}; // @[Parameters.scala:214:{21,47}]
wire [1:0] offset_hi_lo_hi_hi = {_offset_T_20, _offset_T_19}; // @[Parameters.scala:214:{21,47}]
wire [3:0] offset_hi_lo_hi = {offset_hi_lo_hi_hi, offset_hi_lo_hi_lo}; // @[Parameters.scala:214:21]
wire [6:0] offset_hi_lo = {offset_hi_lo_hi, offset_hi_lo_lo}; // @[Parameters.scala:214:21]
wire [1:0] offset_hi_hi_lo_lo = {_offset_T_22, _offset_T_21}; // @[Parameters.scala:214:{21,47}]
wire [1:0] offset_hi_hi_lo_hi = {_offset_T_24, _offset_T_23}; // @[Parameters.scala:214:{21,47}]
wire [3:0] offset_hi_hi_lo = {offset_hi_hi_lo_hi, offset_hi_hi_lo_lo}; // @[Parameters.scala:214:21]
wire [1:0] offset_hi_hi_hi_lo = {_offset_T_26, _offset_T_25}; // @[Parameters.scala:214:{21,47}]
wire [1:0] offset_hi_hi_hi_hi = {_offset_T_28, _offset_T_27}; // @[Parameters.scala:214:{21,47}]
wire [3:0] offset_hi_hi_hi = {offset_hi_hi_hi_hi, offset_hi_hi_hi_lo}; // @[Parameters.scala:214:21]
wire [7:0] offset_hi_hi = {offset_hi_hi_hi, offset_hi_hi_lo}; // @[Parameters.scala:214:21]
wire [14:0] offset_hi = {offset_hi_hi, offset_hi_lo}; // @[Parameters.scala:214:21]
wire [28:0] offset = {offset_hi, offset_lo}; // @[Parameters.scala:214:21]
wire [22:0] set = offset[28:6]; // @[Parameters.scala:214:21, :215:22]
wire [12:0] tag = set[22:10]; // @[Parameters.scala:215:22, :216:19]
assign tag_1 = tag; // @[Parameters.scala:216:19, :217:9]
assign io_req_bits_tag_0 = tag_1; // @[SinkX.scala:28:7]
assign set_1 = set[9:0]; // @[Parameters.scala:215:22, :217:28]
assign io_req_bits_set_0 = set_1; // @[SinkX.scala:28:7]
wire [5:0] offset_1 = offset[5:0]; // @[Parameters.scala:214:21, :217:50]
Queue1_SinkXRequest x_q ( // @[Decoupled.scala:362:21]
.clock (clock),
.reset (reset),
.io_enq_ready (io_x_ready_0),
.io_enq_valid (io_x_valid_0), // @[SinkX.scala:28:7]
.io_enq_bits_address (io_x_bits_address_0), // @[SinkX.scala:28:7]
.io_deq_ready (io_req_ready_0), // @[SinkX.scala:28:7]
.io_deq_valid (io_req_valid_0),
.io_deq_bits_address (_x_q_io_deq_bits_address)
); // @[Decoupled.scala:362:21]
assign io_req_valid = io_req_valid_0; // @[SinkX.scala:28:7]
assign io_req_bits_tag = io_req_bits_tag_0; // @[SinkX.scala:28:7]
assign io_req_bits_set = io_req_bits_set_0; // @[SinkX.scala:28:7]
assign io_x_ready = io_x_ready_0; // @[SinkX.scala:28:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File SynchronizerReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util.{RegEnable, Cat}
/** These wrap behavioral
* shift and next registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
*
* These are built up of *ResetSynchronizerPrimitiveShiftReg,
* intended to be replaced by the integrator's metastable flops chains or replaced
* at this level if they have a multi-bit wide synchronizer primitive.
* The different types vary in their reset behavior:
* NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin
* AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep
* 1-bit-wide shift registers.
* SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg
*
* [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference.
*
* ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross
* Clock Domains.
*/
object SynchronizerResetType extends Enumeration {
val NonSync, Inferred, Sync, Async = Value
}
// Note: this should not be used directly.
// Use the companion object to generate this with the correct reset type mixin.
private class SynchronizerPrimitiveShiftReg(
sync: Int,
init: Boolean,
resetType: SynchronizerResetType.Value)
extends AbstractPipelineReg(1) {
val initInt = if (init) 1 else 0
val initPostfix = resetType match {
case SynchronizerResetType.NonSync => ""
case _ => s"_i${initInt}"
}
override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}"
val chain = List.tabulate(sync) { i =>
val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B)
reg.suggestName(s"sync_$i")
}
chain.last := io.d.asBool
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink := source
}
io.q := chain.head.asUInt
}
private object SynchronizerPrimitiveShiftReg {
def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = {
val gen: () => SynchronizerPrimitiveShiftReg = resetType match {
case SynchronizerResetType.NonSync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
case SynchronizerResetType.Async =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset
case SynchronizerResetType.Sync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset
case SynchronizerResetType.Inferred =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
}
AbstractPipelineReg(gen(), in)
}
}
// Note: This module may end up with a non-AsyncReset type reset.
// But the Primitives within will always have AsyncReset type.
class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int)
extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asAsyncReset){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async)
}
}
io.q := Cat(output.reverse)
}
object AsyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
// Note: This module may end up with a non-Bool type reset.
// But the Primitives within will always have Bool reset type.
@deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2")
class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asBool){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync)
}
}
io.q := Cat(output.reverse)
}
object SyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred)
}
io.q := Cat(output.reverse)
}
object ResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}"
val output = Seq.tabulate(w) { i =>
SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync)
}
io.q := Cat(output.reverse)
}
object SynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, None)
def apply [T <: Data](in: T): T =
apply (in, 3, None)
}
class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module {
override def desiredName = s"ClockCrossingReg_w${w}"
val io = IO(new Bundle{
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
val en = Input(Bool())
})
val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en)
io.q := cdc_reg
}
object ClockCrossingReg {
def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = {
val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit))
name.foreach{ cdc_reg.suggestName(_) }
cdc_reg.io.d := in.asUInt
cdc_reg.io.en := en
cdc_reg.io.q.asTypeOf(in)
}
}
| module AsyncResetSynchronizerShiftReg_w1_d3_i0_91( // @[SynchronizerReg.scala:80:7]
input clock, // @[SynchronizerReg.scala:80:7]
input reset, // @[SynchronizerReg.scala:80:7]
input io_d, // @[ShiftReg.scala:36:14]
output io_q // @[ShiftReg.scala:36:14]
);
wire io_d_0 = io_d; // @[SynchronizerReg.scala:80:7]
wire _output_T = reset; // @[SynchronizerReg.scala:86:21]
wire _output_T_1 = io_d_0; // @[SynchronizerReg.scala:80:7, :87:41]
wire output_0; // @[ShiftReg.scala:48:24]
wire io_q_0; // @[SynchronizerReg.scala:80:7]
assign io_q_0 = output_0; // @[SynchronizerReg.scala:80:7]
AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_151 output_chain ( // @[ShiftReg.scala:45:23]
.clock (clock),
.reset (_output_T), // @[SynchronizerReg.scala:86:21]
.io_d (_output_T_1), // @[SynchronizerReg.scala:87:41]
.io_q (output_0)
); // @[ShiftReg.scala:45:23]
assign io_q = io_q_0; // @[SynchronizerReg.scala:80:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File primitives.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import chisel3.util._
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
object lowMask
{
def apply(in: UInt, topBound: BigInt, bottomBound: BigInt): UInt =
{
require(topBound != bottomBound)
val numInVals = BigInt(1)<<in.getWidth
if (topBound < bottomBound) {
lowMask(~in, numInVals - 1 - topBound, numInVals - 1 - bottomBound)
} else if (numInVals > 64 /* Empirical */) {
// For simulation performance, we should avoid generating
// exteremely wide shifters, so we divide and conquer.
// Empirically, this does not impact synthesis QoR.
val mid = numInVals / 2
val msb = in(in.getWidth - 1)
val lsbs = in(in.getWidth - 2, 0)
if (mid < topBound) {
if (mid <= bottomBound) {
Mux(msb,
lowMask(lsbs, topBound - mid, bottomBound - mid),
0.U
)
} else {
Mux(msb,
lowMask(lsbs, topBound - mid, 0) ## ((BigInt(1)<<(mid - bottomBound).toInt) - 1).U,
lowMask(lsbs, mid, bottomBound)
)
}
} else {
~Mux(msb, 0.U, ~lowMask(lsbs, topBound, bottomBound))
}
} else {
val shift = (BigInt(-1)<<numInVals.toInt).S>>in
Reverse(
shift(
(numInVals - 1 - bottomBound).toInt,
(numInVals - topBound).toInt
)
)
}
}
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
object countLeadingZeros
{
def apply(in: UInt): UInt = PriorityEncoder(in.asBools.reverse)
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
object orReduceBy2
{
def apply(in: UInt): UInt =
{
val reducedWidth = (in.getWidth + 1)>>1
val reducedVec = Wire(Vec(reducedWidth, Bool()))
for (ix <- 0 until reducedWidth - 1) {
reducedVec(ix) := in(ix * 2 + 1, ix * 2).orR
}
reducedVec(reducedWidth - 1) :=
in(in.getWidth - 1, (reducedWidth - 1) * 2).orR
reducedVec.asUInt
}
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
object orReduceBy4
{
def apply(in: UInt): UInt =
{
val reducedWidth = (in.getWidth + 3)>>2
val reducedVec = Wire(Vec(reducedWidth, Bool()))
for (ix <- 0 until reducedWidth - 1) {
reducedVec(ix) := in(ix * 4 + 3, ix * 4).orR
}
reducedVec(reducedWidth - 1) :=
in(in.getWidth - 1, (reducedWidth - 1) * 4).orR
reducedVec.asUInt
}
}
File MulAddRecFN.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import chisel3.util._
import consts._
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class MulAddRecFN_interIo(expWidth: Int, sigWidth: Int) extends Bundle
{
//*** ENCODE SOME OF THESE CASES IN FEWER BITS?:
val isSigNaNAny = Bool()
val isNaNAOrB = Bool()
val isInfA = Bool()
val isZeroA = Bool()
val isInfB = Bool()
val isZeroB = Bool()
val signProd = Bool()
val isNaNC = Bool()
val isInfC = Bool()
val isZeroC = Bool()
val sExpSum = SInt((expWidth + 2).W)
val doSubMags = Bool()
val CIsDominant = Bool()
val CDom_CAlignDist = UInt(log2Ceil(sigWidth + 1).W)
val highAlignedSigC = UInt((sigWidth + 2).W)
val bit0AlignedSigC = UInt(1.W)
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class MulAddRecFNToRaw_preMul(expWidth: Int, sigWidth: Int) extends RawModule
{
override def desiredName = s"MulAddRecFNToRaw_preMul_e${expWidth}_s${sigWidth}"
val io = IO(new Bundle {
val op = Input(Bits(2.W))
val a = Input(Bits((expWidth + sigWidth + 1).W))
val b = Input(Bits((expWidth + sigWidth + 1).W))
val c = Input(Bits((expWidth + sigWidth + 1).W))
val mulAddA = Output(UInt(sigWidth.W))
val mulAddB = Output(UInt(sigWidth.W))
val mulAddC = Output(UInt((sigWidth * 2).W))
val toPostMul = Output(new MulAddRecFN_interIo(expWidth, sigWidth))
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
//*** POSSIBLE TO REDUCE THIS BY 1 OR 2 BITS? (CURRENTLY 2 BITS BETWEEN
//*** UNSHIFTED C AND PRODUCT):
val sigSumWidth = sigWidth * 3 + 3
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val rawA = rawFloatFromRecFN(expWidth, sigWidth, io.a)
val rawB = rawFloatFromRecFN(expWidth, sigWidth, io.b)
val rawC = rawFloatFromRecFN(expWidth, sigWidth, io.c)
val signProd = rawA.sign ^ rawB.sign ^ io.op(1)
//*** REVIEW THE BIAS FOR 'sExpAlignedProd':
val sExpAlignedProd =
rawA.sExp +& rawB.sExp + (-(BigInt(1)<<expWidth) + sigWidth + 3).S
val doSubMags = signProd ^ rawC.sign ^ io.op(0)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val sNatCAlignDist = sExpAlignedProd - rawC.sExp
val posNatCAlignDist = sNatCAlignDist(expWidth + 1, 0)
val isMinCAlign = rawA.isZero || rawB.isZero || (sNatCAlignDist < 0.S)
val CIsDominant =
! rawC.isZero && (isMinCAlign || (posNatCAlignDist <= sigWidth.U))
val CAlignDist =
Mux(isMinCAlign,
0.U,
Mux(posNatCAlignDist < (sigSumWidth - 1).U,
posNatCAlignDist(log2Ceil(sigSumWidth) - 1, 0),
(sigSumWidth - 1).U
)
)
val mainAlignedSigC =
(Mux(doSubMags, ~rawC.sig, rawC.sig) ## Fill(sigSumWidth - sigWidth + 2, doSubMags)).asSInt>>CAlignDist
val reduced4CExtra =
(orReduceBy4(rawC.sig<<((sigSumWidth - sigWidth - 1) & 3)) &
lowMask(
CAlignDist>>2,
//*** NOT NEEDED?:
// (sigSumWidth + 2)>>2,
(sigSumWidth - 1)>>2,
(sigSumWidth - sigWidth - 1)>>2
)
).orR
val alignedSigC =
Cat(mainAlignedSigC>>3,
Mux(doSubMags,
mainAlignedSigC(2, 0).andR && ! reduced4CExtra,
mainAlignedSigC(2, 0).orR || reduced4CExtra
)
)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
io.mulAddA := rawA.sig
io.mulAddB := rawB.sig
io.mulAddC := alignedSigC(sigWidth * 2, 1)
io.toPostMul.isSigNaNAny :=
isSigNaNRawFloat(rawA) || isSigNaNRawFloat(rawB) ||
isSigNaNRawFloat(rawC)
io.toPostMul.isNaNAOrB := rawA.isNaN || rawB.isNaN
io.toPostMul.isInfA := rawA.isInf
io.toPostMul.isZeroA := rawA.isZero
io.toPostMul.isInfB := rawB.isInf
io.toPostMul.isZeroB := rawB.isZero
io.toPostMul.signProd := signProd
io.toPostMul.isNaNC := rawC.isNaN
io.toPostMul.isInfC := rawC.isInf
io.toPostMul.isZeroC := rawC.isZero
io.toPostMul.sExpSum :=
Mux(CIsDominant, rawC.sExp, sExpAlignedProd - sigWidth.S)
io.toPostMul.doSubMags := doSubMags
io.toPostMul.CIsDominant := CIsDominant
io.toPostMul.CDom_CAlignDist := CAlignDist(log2Ceil(sigWidth + 1) - 1, 0)
io.toPostMul.highAlignedSigC :=
alignedSigC(sigSumWidth - 1, sigWidth * 2 + 1)
io.toPostMul.bit0AlignedSigC := alignedSigC(0)
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class MulAddRecFNToRaw_postMul(expWidth: Int, sigWidth: Int) extends RawModule
{
override def desiredName = s"MulAddRecFNToRaw_postMul_e${expWidth}_s${sigWidth}"
val io = IO(new Bundle {
val fromPreMul = Input(new MulAddRecFN_interIo(expWidth, sigWidth))
val mulAddResult = Input(UInt((sigWidth * 2 + 1).W))
val roundingMode = Input(UInt(3.W))
val invalidExc = Output(Bool())
val rawOut = Output(new RawFloat(expWidth, sigWidth + 2))
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val sigSumWidth = sigWidth * 3 + 3
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val roundingMode_min = (io.roundingMode === round_min)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val opSignC = io.fromPreMul.signProd ^ io.fromPreMul.doSubMags
val sigSum =
Cat(Mux(io.mulAddResult(sigWidth * 2),
io.fromPreMul.highAlignedSigC + 1.U,
io.fromPreMul.highAlignedSigC
),
io.mulAddResult(sigWidth * 2 - 1, 0),
io.fromPreMul.bit0AlignedSigC
)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val CDom_sign = opSignC
val CDom_sExp = io.fromPreMul.sExpSum - io.fromPreMul.doSubMags.zext
val CDom_absSigSum =
Mux(io.fromPreMul.doSubMags,
~sigSum(sigSumWidth - 1, sigWidth + 1),
0.U(1.W) ##
//*** IF GAP IS REDUCED TO 1 BIT, MUST REDUCE THIS COMPONENT TO 1 BIT TOO:
io.fromPreMul.highAlignedSigC(sigWidth + 1, sigWidth) ##
sigSum(sigSumWidth - 3, sigWidth + 2)
)
val CDom_absSigSumExtra =
Mux(io.fromPreMul.doSubMags,
(~sigSum(sigWidth, 1)).orR,
sigSum(sigWidth + 1, 1).orR
)
val CDom_mainSig =
(CDom_absSigSum<<io.fromPreMul.CDom_CAlignDist)(
sigWidth * 2 + 1, sigWidth - 3)
val CDom_reduced4SigExtra =
(orReduceBy4(CDom_absSigSum(sigWidth - 1, 0)<<(~sigWidth & 3)) &
lowMask(io.fromPreMul.CDom_CAlignDist>>2, 0, sigWidth>>2)).orR
val CDom_sig =
Cat(CDom_mainSig>>3,
CDom_mainSig(2, 0).orR || CDom_reduced4SigExtra ||
CDom_absSigSumExtra
)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val notCDom_signSigSum = sigSum(sigWidth * 2 + 3)
val notCDom_absSigSum =
Mux(notCDom_signSigSum,
~sigSum(sigWidth * 2 + 2, 0),
sigSum(sigWidth * 2 + 2, 0) + io.fromPreMul.doSubMags
)
val notCDom_reduced2AbsSigSum = orReduceBy2(notCDom_absSigSum)
val notCDom_normDistReduced2 = countLeadingZeros(notCDom_reduced2AbsSigSum)
val notCDom_nearNormDist = notCDom_normDistReduced2<<1
val notCDom_sExp = io.fromPreMul.sExpSum - notCDom_nearNormDist.asUInt.zext
val notCDom_mainSig =
(notCDom_absSigSum<<notCDom_nearNormDist)(
sigWidth * 2 + 3, sigWidth - 1)
val notCDom_reduced4SigExtra =
(orReduceBy2(
notCDom_reduced2AbsSigSum(sigWidth>>1, 0)<<((sigWidth>>1) & 1)) &
lowMask(notCDom_normDistReduced2>>1, 0, (sigWidth + 2)>>2)
).orR
val notCDom_sig =
Cat(notCDom_mainSig>>3,
notCDom_mainSig(2, 0).orR || notCDom_reduced4SigExtra
)
val notCDom_completeCancellation =
(notCDom_sig(sigWidth + 2, sigWidth + 1) === 0.U)
val notCDom_sign =
Mux(notCDom_completeCancellation,
roundingMode_min,
io.fromPreMul.signProd ^ notCDom_signSigSum
)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val notNaN_isInfProd = io.fromPreMul.isInfA || io.fromPreMul.isInfB
val notNaN_isInfOut = notNaN_isInfProd || io.fromPreMul.isInfC
val notNaN_addZeros =
(io.fromPreMul.isZeroA || io.fromPreMul.isZeroB) &&
io.fromPreMul.isZeroC
io.invalidExc :=
io.fromPreMul.isSigNaNAny ||
(io.fromPreMul.isInfA && io.fromPreMul.isZeroB) ||
(io.fromPreMul.isZeroA && io.fromPreMul.isInfB) ||
(! io.fromPreMul.isNaNAOrB &&
(io.fromPreMul.isInfA || io.fromPreMul.isInfB) &&
io.fromPreMul.isInfC &&
io.fromPreMul.doSubMags)
io.rawOut.isNaN := io.fromPreMul.isNaNAOrB || io.fromPreMul.isNaNC
io.rawOut.isInf := notNaN_isInfOut
//*** IMPROVE?:
io.rawOut.isZero :=
notNaN_addZeros ||
(! io.fromPreMul.CIsDominant && notCDom_completeCancellation)
io.rawOut.sign :=
(notNaN_isInfProd && io.fromPreMul.signProd) ||
(io.fromPreMul.isInfC && opSignC) ||
(notNaN_addZeros && ! roundingMode_min &&
io.fromPreMul.signProd && opSignC) ||
(notNaN_addZeros && roundingMode_min &&
(io.fromPreMul.signProd || opSignC)) ||
(! notNaN_isInfOut && ! notNaN_addZeros &&
Mux(io.fromPreMul.CIsDominant, CDom_sign, notCDom_sign))
io.rawOut.sExp := Mux(io.fromPreMul.CIsDominant, CDom_sExp, notCDom_sExp)
io.rawOut.sig := Mux(io.fromPreMul.CIsDominant, CDom_sig, notCDom_sig)
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class MulAddRecFN(expWidth: Int, sigWidth: Int) extends RawModule
{
override def desiredName = s"MulAddRecFN_e${expWidth}_s${sigWidth}"
val io = IO(new Bundle {
val op = Input(Bits(2.W))
val a = Input(Bits((expWidth + sigWidth + 1).W))
val b = Input(Bits((expWidth + sigWidth + 1).W))
val c = Input(Bits((expWidth + sigWidth + 1).W))
val roundingMode = Input(UInt(3.W))
val detectTininess = Input(UInt(1.W))
val out = Output(Bits((expWidth + sigWidth + 1).W))
val exceptionFlags = Output(Bits(5.W))
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val mulAddRecFNToRaw_preMul =
Module(new MulAddRecFNToRaw_preMul(expWidth, sigWidth))
val mulAddRecFNToRaw_postMul =
Module(new MulAddRecFNToRaw_postMul(expWidth, sigWidth))
mulAddRecFNToRaw_preMul.io.op := io.op
mulAddRecFNToRaw_preMul.io.a := io.a
mulAddRecFNToRaw_preMul.io.b := io.b
mulAddRecFNToRaw_preMul.io.c := io.c
val mulAddResult =
(mulAddRecFNToRaw_preMul.io.mulAddA *
mulAddRecFNToRaw_preMul.io.mulAddB) +&
mulAddRecFNToRaw_preMul.io.mulAddC
mulAddRecFNToRaw_postMul.io.fromPreMul :=
mulAddRecFNToRaw_preMul.io.toPostMul
mulAddRecFNToRaw_postMul.io.mulAddResult := mulAddResult
mulAddRecFNToRaw_postMul.io.roundingMode := io.roundingMode
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val roundRawFNToRecFN =
Module(new RoundRawFNToRecFN(expWidth, sigWidth, 0))
roundRawFNToRecFN.io.invalidExc := mulAddRecFNToRaw_postMul.io.invalidExc
roundRawFNToRecFN.io.infiniteExc := false.B
roundRawFNToRecFN.io.in := mulAddRecFNToRaw_postMul.io.rawOut
roundRawFNToRecFN.io.roundingMode := io.roundingMode
roundRawFNToRecFN.io.detectTininess := io.detectTininess
io.out := roundRawFNToRecFN.io.out
io.exceptionFlags := roundRawFNToRecFN.io.exceptionFlags
}
File rawFloatFromRecFN.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import chisel3.util._
/*----------------------------------------------------------------------------
| In the result, no more than one of 'isNaN', 'isInf', and 'isZero' will be
| set.
*----------------------------------------------------------------------------*/
object rawFloatFromRecFN
{
def apply(expWidth: Int, sigWidth: Int, in: Bits): RawFloat =
{
val exp = in(expWidth + sigWidth - 1, sigWidth - 1)
val isZero = exp(expWidth, expWidth - 2) === 0.U
val isSpecial = exp(expWidth, expWidth - 1) === 3.U
val out = Wire(new RawFloat(expWidth, sigWidth))
out.isNaN := isSpecial && exp(expWidth - 2)
out.isInf := isSpecial && ! exp(expWidth - 2)
out.isZero := isZero
out.sign := in(expWidth + sigWidth)
out.sExp := exp.zext
out.sig := 0.U(1.W) ## ! isZero ## in(sigWidth - 2, 0)
out
}
}
File common.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018 The Regents of
the University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
object consts {
/*------------------------------------------------------------------------
| For rounding to integer values, rounding mode 'odd' rounds to minimum
| magnitude instead, same as 'minMag'.
*------------------------------------------------------------------------*/
def round_near_even = "b000".U(3.W)
def round_minMag = "b001".U(3.W)
def round_min = "b010".U(3.W)
def round_max = "b011".U(3.W)
def round_near_maxMag = "b100".U(3.W)
def round_odd = "b110".U(3.W)
/*------------------------------------------------------------------------
*------------------------------------------------------------------------*/
def tininess_beforeRounding = 0.U
def tininess_afterRounding = 1.U
/*------------------------------------------------------------------------
*------------------------------------------------------------------------*/
def flRoundOpt_sigMSBitAlwaysZero = 1
def flRoundOpt_subnormsAlwaysExact = 2
def flRoundOpt_neverUnderflows = 4
def flRoundOpt_neverOverflows = 8
/*------------------------------------------------------------------------
*------------------------------------------------------------------------*/
def divSqrtOpt_twoBitsPerCycle = 16
}
class RawFloat(val expWidth: Int, val sigWidth: Int) extends Bundle
{
val isNaN: Bool = Bool() // overrides all other fields
val isInf: Bool = Bool() // overrides 'isZero', 'sExp', and 'sig'
val isZero: Bool = Bool() // overrides 'sExp' and 'sig'
val sign: Bool = Bool()
val sExp: SInt = SInt((expWidth + 2).W)
val sig: UInt = UInt((sigWidth + 1).W) // 2 m.s. bits cannot both be 0
}
//*** CHANGE THIS INTO A '.isSigNaN' METHOD OF THE 'RawFloat' CLASS:
object isSigNaNRawFloat
{
def apply(in: RawFloat): Bool = in.isNaN && !in.sig(in.sigWidth - 2)
}
| module MulAddRecFNToRaw_preMul_e8_s24_9( // @[MulAddRecFN.scala:71:7]
input [32:0] io_a, // @[MulAddRecFN.scala:74:16]
input [32:0] io_b, // @[MulAddRecFN.scala:74:16]
output [23:0] io_mulAddA, // @[MulAddRecFN.scala:74:16]
output [23:0] io_mulAddB, // @[MulAddRecFN.scala:74:16]
output [47:0] io_mulAddC, // @[MulAddRecFN.scala:74:16]
output io_toPostMul_isSigNaNAny, // @[MulAddRecFN.scala:74:16]
output io_toPostMul_isNaNAOrB, // @[MulAddRecFN.scala:74:16]
output io_toPostMul_isInfA, // @[MulAddRecFN.scala:74:16]
output io_toPostMul_isZeroA, // @[MulAddRecFN.scala:74:16]
output io_toPostMul_isInfB, // @[MulAddRecFN.scala:74:16]
output io_toPostMul_isZeroB, // @[MulAddRecFN.scala:74:16]
output io_toPostMul_signProd, // @[MulAddRecFN.scala:74:16]
output [9:0] io_toPostMul_sExpSum, // @[MulAddRecFN.scala:74:16]
output io_toPostMul_doSubMags, // @[MulAddRecFN.scala:74:16]
output [4:0] io_toPostMul_CDom_CAlignDist, // @[MulAddRecFN.scala:74:16]
output [25:0] io_toPostMul_highAlignedSigC, // @[MulAddRecFN.scala:74:16]
output io_toPostMul_bit0AlignedSigC // @[MulAddRecFN.scala:74:16]
);
wire [32:0] io_a_0 = io_a; // @[MulAddRecFN.scala:71:7]
wire [32:0] io_b_0 = io_b; // @[MulAddRecFN.scala:71:7]
wire [8:0] rawC_exp = 9'h0; // @[rawFloatFromRecFN.scala:51:21]
wire [9:0] rawC_sExp = 10'h0; // @[rawFloatFromRecFN.scala:55:23, :60:27]
wire [9:0] _rawC_out_sExp_T = 10'h0; // @[rawFloatFromRecFN.scala:55:23, :60:27]
wire [22:0] _rawC_out_sig_T_2 = 23'h0; // @[rawFloatFromRecFN.scala:61:49]
wire [24:0] rawC_sig = 25'h0; // @[rawFloatFromRecFN.scala:55:23, :61:44]
wire [24:0] _rawC_out_sig_T_3 = 25'h0; // @[rawFloatFromRecFN.scala:55:23, :61:44]
wire [24:0] _mainAlignedSigC_T = 25'h1FFFFFF; // @[MulAddRecFN.scala:120:25]
wire [26:0] _reduced4CExtra_T = 27'h0; // @[MulAddRecFN.scala:122:30]
wire [2:0] _rawC_isZero_T = 3'h0; // @[rawFloatFromRecFN.scala:52:28]
wire [2:0] _reduced4CExtra_reducedVec_6_T = 3'h0; // @[rawFloatFromRecFN.scala:52:28]
wire [2:0] reduced4CExtra_lo = 3'h0; // @[rawFloatFromRecFN.scala:52:28]
wire [3:0] _reduced4CExtra_reducedVec_0_T = 4'h0; // @[primitives.scala:120:33, :124:20]
wire [3:0] _reduced4CExtra_reducedVec_1_T = 4'h0; // @[primitives.scala:120:33, :124:20]
wire [3:0] _reduced4CExtra_reducedVec_2_T = 4'h0; // @[primitives.scala:120:33, :124:20]
wire [3:0] _reduced4CExtra_reducedVec_3_T = 4'h0; // @[primitives.scala:120:33, :124:20]
wire [3:0] _reduced4CExtra_reducedVec_4_T = 4'h0; // @[primitives.scala:120:33, :124:20]
wire [3:0] _reduced4CExtra_reducedVec_5_T = 4'h0; // @[primitives.scala:120:33, :124:20]
wire [3:0] reduced4CExtra_hi = 4'h0; // @[primitives.scala:120:33, :124:20]
wire [6:0] _reduced4CExtra_T_1 = 7'h0; // @[primitives.scala:124:20]
wire [6:0] _reduced4CExtra_T_19 = 7'h0; // @[MulAddRecFN.scala:122:68]
wire io_toPostMul_isZeroC = 1'h1; // @[rawFloatFromRecFN.scala:52:53, :55:23, :57:36]
wire rawC_isZero = 1'h1; // @[rawFloatFromRecFN.scala:52:53, :55:23, :57:36]
wire rawC_isZero_0 = 1'h1; // @[rawFloatFromRecFN.scala:52:53, :55:23, :57:36]
wire _rawC_out_isInf_T_1 = 1'h1; // @[rawFloatFromRecFN.scala:52:53, :55:23, :57:36]
wire _alignedSigC_T_3 = 1'h1; // @[rawFloatFromRecFN.scala:52:53, :55:23, :57:36]
wire _io_toPostMul_isSigNaNAny_T_8 = 1'h1; // @[rawFloatFromRecFN.scala:52:53, :55:23, :57:36]
wire io_toPostMul_isNaNC = 1'h0; // @[MulAddRecFN.scala:71:7]
wire io_toPostMul_isInfC = 1'h0; // @[MulAddRecFN.scala:71:7]
wire io_toPostMul_CIsDominant = 1'h0; // @[MulAddRecFN.scala:71:7]
wire rawC_isSpecial = 1'h0; // @[rawFloatFromRecFN.scala:53:53]
wire rawC_isNaN = 1'h0; // @[rawFloatFromRecFN.scala:55:23]
wire rawC_isInf = 1'h0; // @[rawFloatFromRecFN.scala:55:23]
wire rawC_sign = 1'h0; // @[rawFloatFromRecFN.scala:55:23]
wire _rawC_out_isNaN_T = 1'h0; // @[rawFloatFromRecFN.scala:56:41]
wire _rawC_out_isNaN_T_1 = 1'h0; // @[rawFloatFromRecFN.scala:56:33]
wire _rawC_out_isInf_T = 1'h0; // @[rawFloatFromRecFN.scala:57:41]
wire _rawC_out_isInf_T_2 = 1'h0; // @[rawFloatFromRecFN.scala:57:33]
wire _rawC_out_sign_T = 1'h0; // @[rawFloatFromRecFN.scala:59:25]
wire _rawC_out_sig_T = 1'h0; // @[rawFloatFromRecFN.scala:61:35]
wire _signProd_T_1 = 1'h0; // @[MulAddRecFN.scala:97:49]
wire _doSubMags_T_1 = 1'h0; // @[MulAddRecFN.scala:102:49]
wire _CIsDominant_T = 1'h0; // @[MulAddRecFN.scala:110:9]
wire CIsDominant = 1'h0; // @[MulAddRecFN.scala:110:23]
wire reduced4CExtra_reducedVec_0 = 1'h0; // @[primitives.scala:118:30]
wire reduced4CExtra_reducedVec_1 = 1'h0; // @[primitives.scala:118:30]
wire reduced4CExtra_reducedVec_2 = 1'h0; // @[primitives.scala:118:30]
wire reduced4CExtra_reducedVec_3 = 1'h0; // @[primitives.scala:118:30]
wire reduced4CExtra_reducedVec_4 = 1'h0; // @[primitives.scala:118:30]
wire reduced4CExtra_reducedVec_5 = 1'h0; // @[primitives.scala:118:30]
wire reduced4CExtra_reducedVec_6 = 1'h0; // @[primitives.scala:118:30]
wire _reduced4CExtra_reducedVec_0_T_1 = 1'h0; // @[primitives.scala:120:54]
wire _reduced4CExtra_reducedVec_1_T_1 = 1'h0; // @[primitives.scala:120:54]
wire _reduced4CExtra_reducedVec_2_T_1 = 1'h0; // @[primitives.scala:120:54]
wire _reduced4CExtra_reducedVec_3_T_1 = 1'h0; // @[primitives.scala:120:54]
wire _reduced4CExtra_reducedVec_4_T_1 = 1'h0; // @[primitives.scala:120:54]
wire _reduced4CExtra_reducedVec_5_T_1 = 1'h0; // @[primitives.scala:120:54]
wire _reduced4CExtra_reducedVec_6_T_1 = 1'h0; // @[primitives.scala:123:57]
wire reduced4CExtra = 1'h0; // @[MulAddRecFN.scala:130:11]
wire _io_toPostMul_isSigNaNAny_T_7 = 1'h0; // @[common.scala:82:56]
wire _io_toPostMul_isSigNaNAny_T_9 = 1'h0; // @[common.scala:82:46]
wire [32:0] io_c = 33'h0; // @[MulAddRecFN.scala:71:7, :74:16]
wire [1:0] io_op = 2'h0; // @[rawFloatFromRecFN.scala:53:28, :61:32]
wire [1:0] _rawC_isSpecial_T = 2'h0; // @[rawFloatFromRecFN.scala:53:28, :61:32]
wire [1:0] _rawC_out_sig_T_1 = 2'h0; // @[rawFloatFromRecFN.scala:53:28, :61:32]
wire [1:0] reduced4CExtra_lo_hi = 2'h0; // @[rawFloatFromRecFN.scala:53:28, :61:32]
wire [1:0] reduced4CExtra_hi_lo = 2'h0; // @[rawFloatFromRecFN.scala:53:28, :61:32]
wire [1:0] reduced4CExtra_hi_hi = 2'h0; // @[rawFloatFromRecFN.scala:53:28, :61:32]
wire [47:0] _io_mulAddC_T; // @[MulAddRecFN.scala:143:30]
wire _io_toPostMul_isSigNaNAny_T_10; // @[MulAddRecFN.scala:146:58]
wire _io_toPostMul_isNaNAOrB_T; // @[MulAddRecFN.scala:148:42]
wire rawA_isInf; // @[rawFloatFromRecFN.scala:55:23]
wire rawA_isZero; // @[rawFloatFromRecFN.scala:55:23]
wire rawB_isInf; // @[rawFloatFromRecFN.scala:55:23]
wire rawB_isZero; // @[rawFloatFromRecFN.scala:55:23]
wire signProd; // @[MulAddRecFN.scala:97:42]
wire doSubMags; // @[MulAddRecFN.scala:102:42]
wire [4:0] _io_toPostMul_CDom_CAlignDist_T; // @[MulAddRecFN.scala:161:47]
wire [25:0] _io_toPostMul_highAlignedSigC_T; // @[MulAddRecFN.scala:163:20]
wire _io_toPostMul_bit0AlignedSigC_T; // @[MulAddRecFN.scala:164:48]
wire io_toPostMul_isSigNaNAny_0; // @[MulAddRecFN.scala:71:7]
wire io_toPostMul_isNaNAOrB_0; // @[MulAddRecFN.scala:71:7]
wire io_toPostMul_isInfA_0; // @[MulAddRecFN.scala:71:7]
wire io_toPostMul_isZeroA_0; // @[MulAddRecFN.scala:71:7]
wire io_toPostMul_isInfB_0; // @[MulAddRecFN.scala:71:7]
wire io_toPostMul_isZeroB_0; // @[MulAddRecFN.scala:71:7]
wire io_toPostMul_signProd_0; // @[MulAddRecFN.scala:71:7]
wire [9:0] io_toPostMul_sExpSum_0; // @[MulAddRecFN.scala:71:7]
wire io_toPostMul_doSubMags_0; // @[MulAddRecFN.scala:71:7]
wire [4:0] io_toPostMul_CDom_CAlignDist_0; // @[MulAddRecFN.scala:71:7]
wire [25:0] io_toPostMul_highAlignedSigC_0; // @[MulAddRecFN.scala:71:7]
wire io_toPostMul_bit0AlignedSigC_0; // @[MulAddRecFN.scala:71:7]
wire [23:0] io_mulAddA_0; // @[MulAddRecFN.scala:71:7]
wire [23:0] io_mulAddB_0; // @[MulAddRecFN.scala:71:7]
wire [47:0] io_mulAddC_0; // @[MulAddRecFN.scala:71:7]
wire [8:0] rawA_exp = io_a_0[31:23]; // @[rawFloatFromRecFN.scala:51:21]
wire [2:0] _rawA_isZero_T = rawA_exp[8:6]; // @[rawFloatFromRecFN.scala:51:21, :52:28]
wire rawA_isZero_0 = _rawA_isZero_T == 3'h0; // @[rawFloatFromRecFN.scala:52:{28,53}]
assign rawA_isZero = rawA_isZero_0; // @[rawFloatFromRecFN.scala:52:53, :55:23]
wire [1:0] _rawA_isSpecial_T = rawA_exp[8:7]; // @[rawFloatFromRecFN.scala:51:21, :53:28]
wire rawA_isSpecial = &_rawA_isSpecial_T; // @[rawFloatFromRecFN.scala:53:{28,53}]
wire _rawA_out_isNaN_T_1; // @[rawFloatFromRecFN.scala:56:33]
wire _rawA_out_isInf_T_2; // @[rawFloatFromRecFN.scala:57:33]
assign io_toPostMul_isInfA_0 = rawA_isInf; // @[rawFloatFromRecFN.scala:55:23]
assign io_toPostMul_isZeroA_0 = rawA_isZero; // @[rawFloatFromRecFN.scala:55:23]
wire _rawA_out_sign_T; // @[rawFloatFromRecFN.scala:59:25]
wire [9:0] _rawA_out_sExp_T; // @[rawFloatFromRecFN.scala:60:27]
wire [24:0] _rawA_out_sig_T_3; // @[rawFloatFromRecFN.scala:61:44]
wire rawA_isNaN; // @[rawFloatFromRecFN.scala:55:23]
wire rawA_sign; // @[rawFloatFromRecFN.scala:55:23]
wire [9:0] rawA_sExp; // @[rawFloatFromRecFN.scala:55:23]
wire [24:0] rawA_sig; // @[rawFloatFromRecFN.scala:55:23]
wire _rawA_out_isNaN_T = rawA_exp[6]; // @[rawFloatFromRecFN.scala:51:21, :56:41]
wire _rawA_out_isInf_T = rawA_exp[6]; // @[rawFloatFromRecFN.scala:51:21, :56:41, :57:41]
assign _rawA_out_isNaN_T_1 = rawA_isSpecial & _rawA_out_isNaN_T; // @[rawFloatFromRecFN.scala:53:53, :56:{33,41}]
assign rawA_isNaN = _rawA_out_isNaN_T_1; // @[rawFloatFromRecFN.scala:55:23, :56:33]
wire _rawA_out_isInf_T_1 = ~_rawA_out_isInf_T; // @[rawFloatFromRecFN.scala:57:{36,41}]
assign _rawA_out_isInf_T_2 = rawA_isSpecial & _rawA_out_isInf_T_1; // @[rawFloatFromRecFN.scala:53:53, :57:{33,36}]
assign rawA_isInf = _rawA_out_isInf_T_2; // @[rawFloatFromRecFN.scala:55:23, :57:33]
assign _rawA_out_sign_T = io_a_0[32]; // @[rawFloatFromRecFN.scala:59:25]
assign rawA_sign = _rawA_out_sign_T; // @[rawFloatFromRecFN.scala:55:23, :59:25]
assign _rawA_out_sExp_T = {1'h0, rawA_exp}; // @[rawFloatFromRecFN.scala:51:21, :60:27]
assign rawA_sExp = _rawA_out_sExp_T; // @[rawFloatFromRecFN.scala:55:23, :60:27]
wire _rawA_out_sig_T = ~rawA_isZero_0; // @[rawFloatFromRecFN.scala:52:53, :61:35]
wire [1:0] _rawA_out_sig_T_1 = {1'h0, _rawA_out_sig_T}; // @[rawFloatFromRecFN.scala:61:{32,35}]
wire [22:0] _rawA_out_sig_T_2 = io_a_0[22:0]; // @[rawFloatFromRecFN.scala:61:49]
assign _rawA_out_sig_T_3 = {_rawA_out_sig_T_1, _rawA_out_sig_T_2}; // @[rawFloatFromRecFN.scala:61:{32,44,49}]
assign rawA_sig = _rawA_out_sig_T_3; // @[rawFloatFromRecFN.scala:55:23, :61:44]
wire [8:0] rawB_exp = io_b_0[31:23]; // @[rawFloatFromRecFN.scala:51:21]
wire [2:0] _rawB_isZero_T = rawB_exp[8:6]; // @[rawFloatFromRecFN.scala:51:21, :52:28]
wire rawB_isZero_0 = _rawB_isZero_T == 3'h0; // @[rawFloatFromRecFN.scala:52:{28,53}]
assign rawB_isZero = rawB_isZero_0; // @[rawFloatFromRecFN.scala:52:53, :55:23]
wire [1:0] _rawB_isSpecial_T = rawB_exp[8:7]; // @[rawFloatFromRecFN.scala:51:21, :53:28]
wire rawB_isSpecial = &_rawB_isSpecial_T; // @[rawFloatFromRecFN.scala:53:{28,53}]
wire _rawB_out_isNaN_T_1; // @[rawFloatFromRecFN.scala:56:33]
wire _rawB_out_isInf_T_2; // @[rawFloatFromRecFN.scala:57:33]
assign io_toPostMul_isInfB_0 = rawB_isInf; // @[rawFloatFromRecFN.scala:55:23]
assign io_toPostMul_isZeroB_0 = rawB_isZero; // @[rawFloatFromRecFN.scala:55:23]
wire _rawB_out_sign_T; // @[rawFloatFromRecFN.scala:59:25]
wire [9:0] _rawB_out_sExp_T; // @[rawFloatFromRecFN.scala:60:27]
wire [24:0] _rawB_out_sig_T_3; // @[rawFloatFromRecFN.scala:61:44]
wire rawB_isNaN; // @[rawFloatFromRecFN.scala:55:23]
wire rawB_sign; // @[rawFloatFromRecFN.scala:55:23]
wire [9:0] rawB_sExp; // @[rawFloatFromRecFN.scala:55:23]
wire [24:0] rawB_sig; // @[rawFloatFromRecFN.scala:55:23]
wire _rawB_out_isNaN_T = rawB_exp[6]; // @[rawFloatFromRecFN.scala:51:21, :56:41]
wire _rawB_out_isInf_T = rawB_exp[6]; // @[rawFloatFromRecFN.scala:51:21, :56:41, :57:41]
assign _rawB_out_isNaN_T_1 = rawB_isSpecial & _rawB_out_isNaN_T; // @[rawFloatFromRecFN.scala:53:53, :56:{33,41}]
assign rawB_isNaN = _rawB_out_isNaN_T_1; // @[rawFloatFromRecFN.scala:55:23, :56:33]
wire _rawB_out_isInf_T_1 = ~_rawB_out_isInf_T; // @[rawFloatFromRecFN.scala:57:{36,41}]
assign _rawB_out_isInf_T_2 = rawB_isSpecial & _rawB_out_isInf_T_1; // @[rawFloatFromRecFN.scala:53:53, :57:{33,36}]
assign rawB_isInf = _rawB_out_isInf_T_2; // @[rawFloatFromRecFN.scala:55:23, :57:33]
assign _rawB_out_sign_T = io_b_0[32]; // @[rawFloatFromRecFN.scala:59:25]
assign rawB_sign = _rawB_out_sign_T; // @[rawFloatFromRecFN.scala:55:23, :59:25]
assign _rawB_out_sExp_T = {1'h0, rawB_exp}; // @[rawFloatFromRecFN.scala:51:21, :60:27]
assign rawB_sExp = _rawB_out_sExp_T; // @[rawFloatFromRecFN.scala:55:23, :60:27]
wire _rawB_out_sig_T = ~rawB_isZero_0; // @[rawFloatFromRecFN.scala:52:53, :61:35]
wire [1:0] _rawB_out_sig_T_1 = {1'h0, _rawB_out_sig_T}; // @[rawFloatFromRecFN.scala:61:{32,35}]
wire [22:0] _rawB_out_sig_T_2 = io_b_0[22:0]; // @[rawFloatFromRecFN.scala:61:49]
assign _rawB_out_sig_T_3 = {_rawB_out_sig_T_1, _rawB_out_sig_T_2}; // @[rawFloatFromRecFN.scala:61:{32,44,49}]
assign rawB_sig = _rawB_out_sig_T_3; // @[rawFloatFromRecFN.scala:55:23, :61:44]
wire _signProd_T = rawA_sign ^ rawB_sign; // @[rawFloatFromRecFN.scala:55:23]
assign signProd = _signProd_T; // @[MulAddRecFN.scala:97:{30,42}]
assign io_toPostMul_signProd_0 = signProd; // @[MulAddRecFN.scala:71:7, :97:42]
wire _doSubMags_T = signProd; // @[MulAddRecFN.scala:97:42, :102:30]
wire [10:0] _sExpAlignedProd_T = {rawA_sExp[9], rawA_sExp} + {rawB_sExp[9], rawB_sExp}; // @[rawFloatFromRecFN.scala:55:23]
wire [11:0] _sExpAlignedProd_T_1 = {_sExpAlignedProd_T[10], _sExpAlignedProd_T} - 12'hE5; // @[MulAddRecFN.scala:100:{19,32}]
wire [10:0] _sExpAlignedProd_T_2 = _sExpAlignedProd_T_1[10:0]; // @[MulAddRecFN.scala:100:32]
wire [10:0] sExpAlignedProd = _sExpAlignedProd_T_2; // @[MulAddRecFN.scala:100:32]
assign doSubMags = _doSubMags_T; // @[MulAddRecFN.scala:102:{30,42}]
assign io_toPostMul_doSubMags_0 = doSubMags; // @[MulAddRecFN.scala:71:7, :102:42]
wire [11:0] _sNatCAlignDist_T = {sExpAlignedProd[10], sExpAlignedProd}; // @[rawFloatFromRecFN.scala:55:23, :60:27]
wire [10:0] _sNatCAlignDist_T_1 = _sNatCAlignDist_T[10:0]; // @[MulAddRecFN.scala:106:42]
wire [10:0] sNatCAlignDist = _sNatCAlignDist_T_1; // @[MulAddRecFN.scala:106:42]
wire [9:0] posNatCAlignDist = sNatCAlignDist[9:0]; // @[MulAddRecFN.scala:106:42, :107:42]
wire _isMinCAlign_T = rawA_isZero | rawB_isZero; // @[rawFloatFromRecFN.scala:55:23]
wire _isMinCAlign_T_1 = $signed(sNatCAlignDist) < 11'sh0; // @[MulAddRecFN.scala:106:42, :108:69]
wire isMinCAlign = _isMinCAlign_T | _isMinCAlign_T_1; // @[MulAddRecFN.scala:108:{35,50,69}]
wire _CIsDominant_T_1 = posNatCAlignDist < 10'h19; // @[MulAddRecFN.scala:107:42, :110:60]
wire _CIsDominant_T_2 = isMinCAlign | _CIsDominant_T_1; // @[MulAddRecFN.scala:108:50, :110:{39,60}]
wire _CAlignDist_T = posNatCAlignDist < 10'h4A; // @[MulAddRecFN.scala:107:42, :114:34]
wire [6:0] _CAlignDist_T_1 = posNatCAlignDist[6:0]; // @[MulAddRecFN.scala:107:42, :115:33]
wire [6:0] _CAlignDist_T_2 = _CAlignDist_T ? _CAlignDist_T_1 : 7'h4A; // @[MulAddRecFN.scala:114:{16,34}, :115:33]
wire [6:0] CAlignDist = isMinCAlign ? 7'h0 : _CAlignDist_T_2; // @[MulAddRecFN.scala:108:50, :112:12, :114:16]
wire [24:0] _mainAlignedSigC_T_1 = {25{doSubMags}}; // @[MulAddRecFN.scala:102:42, :120:13]
wire [52:0] _mainAlignedSigC_T_2 = {53{doSubMags}}; // @[MulAddRecFN.scala:102:42, :120:53]
wire [77:0] _mainAlignedSigC_T_3 = {_mainAlignedSigC_T_1, _mainAlignedSigC_T_2}; // @[MulAddRecFN.scala:120:{13,46,53}]
wire [77:0] _mainAlignedSigC_T_4 = _mainAlignedSigC_T_3; // @[MulAddRecFN.scala:120:{46,94}]
wire [77:0] mainAlignedSigC = $signed($signed(_mainAlignedSigC_T_4) >>> CAlignDist); // @[MulAddRecFN.scala:112:12, :120:{94,100}]
wire [4:0] _reduced4CExtra_T_2 = CAlignDist[6:2]; // @[MulAddRecFN.scala:112:12, :124:28]
wire [32:0] reduced4CExtra_shift = $signed(33'sh100000000 >>> _reduced4CExtra_T_2); // @[primitives.scala:76:56]
wire [5:0] _reduced4CExtra_T_3 = reduced4CExtra_shift[19:14]; // @[primitives.scala:76:56, :78:22]
wire [3:0] _reduced4CExtra_T_4 = _reduced4CExtra_T_3[3:0]; // @[primitives.scala:77:20, :78:22]
wire [1:0] _reduced4CExtra_T_5 = _reduced4CExtra_T_4[1:0]; // @[primitives.scala:77:20]
wire _reduced4CExtra_T_6 = _reduced4CExtra_T_5[0]; // @[primitives.scala:77:20]
wire _reduced4CExtra_T_7 = _reduced4CExtra_T_5[1]; // @[primitives.scala:77:20]
wire [1:0] _reduced4CExtra_T_8 = {_reduced4CExtra_T_6, _reduced4CExtra_T_7}; // @[primitives.scala:77:20]
wire [1:0] _reduced4CExtra_T_9 = _reduced4CExtra_T_4[3:2]; // @[primitives.scala:77:20]
wire _reduced4CExtra_T_10 = _reduced4CExtra_T_9[0]; // @[primitives.scala:77:20]
wire _reduced4CExtra_T_11 = _reduced4CExtra_T_9[1]; // @[primitives.scala:77:20]
wire [1:0] _reduced4CExtra_T_12 = {_reduced4CExtra_T_10, _reduced4CExtra_T_11}; // @[primitives.scala:77:20]
wire [3:0] _reduced4CExtra_T_13 = {_reduced4CExtra_T_8, _reduced4CExtra_T_12}; // @[primitives.scala:77:20]
wire [1:0] _reduced4CExtra_T_14 = _reduced4CExtra_T_3[5:4]; // @[primitives.scala:77:20, :78:22]
wire _reduced4CExtra_T_15 = _reduced4CExtra_T_14[0]; // @[primitives.scala:77:20]
wire _reduced4CExtra_T_16 = _reduced4CExtra_T_14[1]; // @[primitives.scala:77:20]
wire [1:0] _reduced4CExtra_T_17 = {_reduced4CExtra_T_15, _reduced4CExtra_T_16}; // @[primitives.scala:77:20]
wire [5:0] _reduced4CExtra_T_18 = {_reduced4CExtra_T_13, _reduced4CExtra_T_17}; // @[primitives.scala:77:20]
wire [74:0] _alignedSigC_T = mainAlignedSigC[77:3]; // @[MulAddRecFN.scala:120:100, :132:28]
wire [74:0] alignedSigC_hi = _alignedSigC_T; // @[MulAddRecFN.scala:132:{12,28}]
wire [2:0] _alignedSigC_T_1 = mainAlignedSigC[2:0]; // @[MulAddRecFN.scala:120:100, :134:32]
wire [2:0] _alignedSigC_T_5 = mainAlignedSigC[2:0]; // @[MulAddRecFN.scala:120:100, :134:32, :135:32]
wire _alignedSigC_T_2 = &_alignedSigC_T_1; // @[MulAddRecFN.scala:134:{32,39}]
wire _alignedSigC_T_4 = _alignedSigC_T_2; // @[MulAddRecFN.scala:134:{39,44}]
wire _alignedSigC_T_6 = |_alignedSigC_T_5; // @[MulAddRecFN.scala:135:{32,39}]
wire _alignedSigC_T_7 = _alignedSigC_T_6; // @[MulAddRecFN.scala:135:{39,44}]
wire _alignedSigC_T_8 = doSubMags ? _alignedSigC_T_4 : _alignedSigC_T_7; // @[MulAddRecFN.scala:102:42, :133:16, :134:44, :135:44]
wire [75:0] alignedSigC = {alignedSigC_hi, _alignedSigC_T_8}; // @[MulAddRecFN.scala:132:12, :133:16]
assign io_mulAddA_0 = rawA_sig[23:0]; // @[rawFloatFromRecFN.scala:55:23]
assign io_mulAddB_0 = rawB_sig[23:0]; // @[rawFloatFromRecFN.scala:55:23]
assign _io_mulAddC_T = alignedSigC[48:1]; // @[MulAddRecFN.scala:132:12, :143:30]
assign io_mulAddC_0 = _io_mulAddC_T; // @[MulAddRecFN.scala:71:7, :143:30]
wire _io_toPostMul_isSigNaNAny_T = rawA_sig[22]; // @[rawFloatFromRecFN.scala:55:23]
wire _io_toPostMul_isSigNaNAny_T_1 = ~_io_toPostMul_isSigNaNAny_T; // @[common.scala:82:{49,56}]
wire _io_toPostMul_isSigNaNAny_T_2 = rawA_isNaN & _io_toPostMul_isSigNaNAny_T_1; // @[rawFloatFromRecFN.scala:55:23]
wire _io_toPostMul_isSigNaNAny_T_3 = rawB_sig[22]; // @[rawFloatFromRecFN.scala:55:23]
wire _io_toPostMul_isSigNaNAny_T_4 = ~_io_toPostMul_isSigNaNAny_T_3; // @[common.scala:82:{49,56}]
wire _io_toPostMul_isSigNaNAny_T_5 = rawB_isNaN & _io_toPostMul_isSigNaNAny_T_4; // @[rawFloatFromRecFN.scala:55:23]
wire _io_toPostMul_isSigNaNAny_T_6 = _io_toPostMul_isSigNaNAny_T_2 | _io_toPostMul_isSigNaNAny_T_5; // @[common.scala:82:46]
assign _io_toPostMul_isSigNaNAny_T_10 = _io_toPostMul_isSigNaNAny_T_6; // @[MulAddRecFN.scala:146:{32,58}]
assign io_toPostMul_isSigNaNAny_0 = _io_toPostMul_isSigNaNAny_T_10; // @[MulAddRecFN.scala:71:7, :146:58]
assign _io_toPostMul_isNaNAOrB_T = rawA_isNaN | rawB_isNaN; // @[rawFloatFromRecFN.scala:55:23]
assign io_toPostMul_isNaNAOrB_0 = _io_toPostMul_isNaNAOrB_T; // @[MulAddRecFN.scala:71:7, :148:42]
wire [11:0] _io_toPostMul_sExpSum_T = _sNatCAlignDist_T - 12'h18; // @[MulAddRecFN.scala:106:42, :158:53]
wire [10:0] _io_toPostMul_sExpSum_T_1 = _io_toPostMul_sExpSum_T[10:0]; // @[MulAddRecFN.scala:158:53]
wire [10:0] _io_toPostMul_sExpSum_T_2 = _io_toPostMul_sExpSum_T_1; // @[MulAddRecFN.scala:158:53]
wire [10:0] _io_toPostMul_sExpSum_T_3 = _io_toPostMul_sExpSum_T_2; // @[MulAddRecFN.scala:158:{12,53}]
assign io_toPostMul_sExpSum_0 = _io_toPostMul_sExpSum_T_3[9:0]; // @[MulAddRecFN.scala:71:7, :157:28, :158:12]
assign _io_toPostMul_CDom_CAlignDist_T = CAlignDist[4:0]; // @[MulAddRecFN.scala:112:12, :161:47]
assign io_toPostMul_CDom_CAlignDist_0 = _io_toPostMul_CDom_CAlignDist_T; // @[MulAddRecFN.scala:71:7, :161:47]
assign _io_toPostMul_highAlignedSigC_T = alignedSigC[74:49]; // @[MulAddRecFN.scala:132:12, :163:20]
assign io_toPostMul_highAlignedSigC_0 = _io_toPostMul_highAlignedSigC_T; // @[MulAddRecFN.scala:71:7, :163:20]
assign _io_toPostMul_bit0AlignedSigC_T = alignedSigC[0]; // @[MulAddRecFN.scala:132:12, :164:48]
assign io_toPostMul_bit0AlignedSigC_0 = _io_toPostMul_bit0AlignedSigC_T; // @[MulAddRecFN.scala:71:7, :164:48]
assign io_mulAddA = io_mulAddA_0; // @[MulAddRecFN.scala:71:7]
assign io_mulAddB = io_mulAddB_0; // @[MulAddRecFN.scala:71:7]
assign io_mulAddC = io_mulAddC_0; // @[MulAddRecFN.scala:71:7]
assign io_toPostMul_isSigNaNAny = io_toPostMul_isSigNaNAny_0; // @[MulAddRecFN.scala:71:7]
assign io_toPostMul_isNaNAOrB = io_toPostMul_isNaNAOrB_0; // @[MulAddRecFN.scala:71:7]
assign io_toPostMul_isInfA = io_toPostMul_isInfA_0; // @[MulAddRecFN.scala:71:7]
assign io_toPostMul_isZeroA = io_toPostMul_isZeroA_0; // @[MulAddRecFN.scala:71:7]
assign io_toPostMul_isInfB = io_toPostMul_isInfB_0; // @[MulAddRecFN.scala:71:7]
assign io_toPostMul_isZeroB = io_toPostMul_isZeroB_0; // @[MulAddRecFN.scala:71:7]
assign io_toPostMul_signProd = io_toPostMul_signProd_0; // @[MulAddRecFN.scala:71:7]
assign io_toPostMul_sExpSum = io_toPostMul_sExpSum_0; // @[MulAddRecFN.scala:71:7]
assign io_toPostMul_doSubMags = io_toPostMul_doSubMags_0; // @[MulAddRecFN.scala:71:7]
assign io_toPostMul_CDom_CAlignDist = io_toPostMul_CDom_CAlignDist_0; // @[MulAddRecFN.scala:71:7]
assign io_toPostMul_highAlignedSigC = io_toPostMul_highAlignedSigC_0; // @[MulAddRecFN.scala:71:7]
assign io_toPostMul_bit0AlignedSigC = io_toPostMul_bit0AlignedSigC_0; // @[MulAddRecFN.scala:71:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Monitor.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceLine
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import freechips.rocketchip.diplomacy.EnableMonitors
import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode}
import freechips.rocketchip.util.PlusArg
case class TLMonitorArgs(edge: TLEdge)
abstract class TLMonitorBase(args: TLMonitorArgs) extends Module
{
val io = IO(new Bundle {
val in = Input(new TLBundle(args.edge.bundle))
})
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit
legalize(io.in, args.edge, reset)
}
object TLMonitor {
def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = {
if (enable) {
EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) }
} else { node }
}
}
class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args)
{
require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal))
val cover_prop_class = PropertyClass.Default
//Like assert but can flip to being an assumption for formal verification
def monAssert(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir, cond, message, PropertyClass.Default)
}
def assume(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir.flip, cond, message, PropertyClass.Default)
}
def extra = {
args.edge.sourceInfo match {
case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)"
case _ => ""
}
}
def visible(address: UInt, source: UInt, edge: TLEdge) =
edge.client.clients.map { c =>
!c.sourceId.contains(source) ||
c.visibility.map(_.contains(address)).reduce(_ || _)
}.reduce(_ && _)
def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = {
//switch this flag to turn on diplomacy in error messages
def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n"
monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra)
// Reuse these subexpressions to save some firrtl lines
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility")
//The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B
//TODO: check for acquireT?
when (bundle.opcode === TLMessages.AcquireBlock) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AcquirePerm) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra)
monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra)
monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra)
monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra)
}
}
def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = {
monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility")
// Reuse these subexpressions to save some firrtl lines
val address_ok = edge.manager.containsSafe(edge.address(bundle))
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source
when (bundle.opcode === TLMessages.Probe) {
assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra)
assume (address_ok, "'B' channel Probe carries unmanaged address" + extra)
assume (legal_source, "'B' channel Probe carries source that is not first source" + extra)
assume (is_aligned, "'B' channel Probe address not aligned to size" + extra)
assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra)
assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra)
assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra)
monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra)
monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra)
}
}
def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = {
monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val address_ok = edge.manager.containsSafe(edge.address(bundle))
monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility")
when (bundle.opcode === TLMessages.ProbeAck) {
monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ProbeAckData) {
monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.Release) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ReleaseData) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra)
}
}
def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = {
assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val sink_ok = bundle.sink < edge.manager.endSinkId.U
val deny_put_ok = edge.manager.mayDenyPut.B
val deny_get_ok = edge.manager.mayDenyGet.B
when (bundle.opcode === TLMessages.ReleaseAck) {
assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra)
assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra)
assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra)
}
when (bundle.opcode === TLMessages.Grant) {
assume (source_ok, "'D' channel Grant carries invalid source ID" + extra)
assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra)
assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra)
}
when (bundle.opcode === TLMessages.GrantData) {
assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra)
assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra)
}
}
def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = {
val sink_ok = bundle.sink < edge.manager.endSinkId.U
monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra)
}
def legalizeFormat(bundle: TLBundle, edge: TLEdge) = {
when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) }
when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) }
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) }
when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) }
when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) }
} else {
monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra)
monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra)
monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra)
}
}
def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = {
val a_first = edge.first(a.bits, a.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (a.valid && !a_first) {
monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra)
monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra)
monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra)
monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra)
monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra)
}
when (a.fire && a_first) {
opcode := a.bits.opcode
param := a.bits.param
size := a.bits.size
source := a.bits.source
address := a.bits.address
}
}
def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = {
val b_first = edge.first(b.bits, b.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (b.valid && !b_first) {
monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra)
monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra)
monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra)
monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra)
monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra)
}
when (b.fire && b_first) {
opcode := b.bits.opcode
param := b.bits.param
size := b.bits.size
source := b.bits.source
address := b.bits.address
}
}
def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = {
// Symbolic variable
val sym_source = Wire(UInt(edge.client.endSourceId.W))
// TODO: Connect sym_source to a fixed value for simulation and to a
// free wire in formal
sym_source := 0.U
// Type casting Int to UInt
val maxSourceId = Wire(UInt(edge.client.endSourceId.W))
maxSourceId := edge.client.endSourceId.U
// Delayed verison of sym_source
val sym_source_d = Reg(UInt(edge.client.endSourceId.W))
sym_source_d := sym_source
// These will be constraints for FV setup
Property(
MonitorDirection.Monitor,
(sym_source === sym_source_d),
"sym_source should remain stable",
PropertyClass.Default)
Property(
MonitorDirection.Monitor,
(sym_source <= maxSourceId),
"sym_source should take legal value",
PropertyClass.Default)
val my_resp_pend = RegInit(false.B)
val my_opcode = Reg(UInt())
val my_size = Reg(UInt())
val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire)
val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire)
val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source)
val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source)
val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat)
val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend)
when (my_set_resp_pend) {
my_resp_pend := true.B
} .elsewhen (my_clr_resp_pend) {
my_resp_pend := false.B
}
when (my_a_first_beat) {
my_opcode := bundle.a.bits.opcode
my_size := bundle.a.bits.size
}
val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size)
val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode)
val my_resp_opcode_legal = Wire(Bool())
when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) ||
(my_resp_opcode === TLMessages.LogicalData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData)
} .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck)
} .otherwise {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck)
}
monAssert (IfThen(my_resp_pend, !my_a_first_beat),
"Request message should not be sent with a source ID, for which a response message" +
"is already pending (not received until current cycle) for a prior request message" +
"with the same source ID" + extra)
assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)),
"Response message should be accepted with a source ID only if a request message with the" +
"same source ID has been accepted or is being accepted in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)),
"Response message should be sent with a source ID only if a request message with the" +
"same source ID has been accepted or is being sent in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)),
"If d_valid is 1, then d_size should be same as a_size of the corresponding request" +
"message" + extra)
assume (IfThen(my_d_first_beat, my_resp_opcode_legal),
"If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" +
"request message" + extra)
}
def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = {
val c_first = edge.first(c.bits, c.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (c.valid && !c_first) {
monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra)
monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra)
monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra)
monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra)
monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra)
}
when (c.fire && c_first) {
opcode := c.bits.opcode
param := c.bits.param
size := c.bits.size
source := c.bits.source
address := c.bits.address
}
}
def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = {
val d_first = edge.first(d.bits, d.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val sink = Reg(UInt())
val denied = Reg(Bool())
when (d.valid && !d_first) {
assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra)
assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra)
assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra)
assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra)
assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra)
assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra)
}
when (d.fire && d_first) {
opcode := d.bits.opcode
param := d.bits.param
size := d.bits.size
source := d.bits.source
sink := d.bits.sink
denied := d.bits.denied
}
}
def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = {
legalizeMultibeatA(bundle.a, edge)
legalizeMultibeatD(bundle.d, edge)
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
legalizeMultibeatB(bundle.b, edge)
legalizeMultibeatC(bundle.c, edge)
}
}
//This is left in for almond which doesn't adhere to the tilelink protocol
@deprecated("Use legalizeADSource instead if possible","")
def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.client.endSourceId.W))
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val a_set = WireInit(0.U(edge.client.endSourceId.W))
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra)
}
if (edge.manager.minLatency > 0) {
assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = {
val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size)
val log_a_size_bus_size = log2Ceil(a_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error
inflight.suggestName("inflight")
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
inflight_opcodes.suggestName("inflight_opcodes")
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
inflight_sizes.suggestName("inflight_sizes")
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
a_first.suggestName("a_first")
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
d_first.suggestName("d_first")
val a_set = WireInit(0.U(edge.client.endSourceId.W))
val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
a_set.suggestName("a_set")
a_set_wo_ready.suggestName("a_set_wo_ready")
val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
a_opcodes_set.suggestName("a_opcodes_set")
val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
a_sizes_set.suggestName("a_sizes_set")
val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W))
a_opcode_lookup.suggestName("a_opcode_lookup")
a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U
val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W))
a_size_lookup.suggestName("a_size_lookup")
a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U
val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant))
val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant))
val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W))
a_opcodes_set_interm.suggestName("a_opcodes_set_interm")
val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W))
a_sizes_set_interm.suggestName("a_sizes_set_interm")
when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) {
a_set_wo_ready := UIntToOH(bundle.a.bits.source)
}
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U
a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U
a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U)
a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U)
monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
d_opcodes_clr.suggestName("d_opcodes_clr")
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) ||
(bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra)
assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) ||
(bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra)
assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) {
assume((!bundle.d.ready) || bundle.a.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = {
val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size)
val log_c_size_bus_size = log2Ceil(c_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W))
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
inflight.suggestName("inflight")
inflight_opcodes.suggestName("inflight_opcodes")
inflight_sizes.suggestName("inflight_sizes")
val c_first = edge.first(bundle.c.bits, bundle.c.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
c_first.suggestName("c_first")
d_first.suggestName("d_first")
val c_set = WireInit(0.U(edge.client.endSourceId.W))
val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
c_set.suggestName("c_set")
c_set_wo_ready.suggestName("c_set_wo_ready")
c_opcodes_set.suggestName("c_opcodes_set")
c_sizes_set.suggestName("c_sizes_set")
val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W))
val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W))
c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U
c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U
c_opcode_lookup.suggestName("c_opcode_lookup")
c_size_lookup.suggestName("c_size_lookup")
val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W))
val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W))
c_opcodes_set_interm.suggestName("c_opcodes_set_interm")
c_sizes_set_interm.suggestName("c_sizes_set_interm")
when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) {
c_set_wo_ready := UIntToOH(bundle.c.bits.source)
}
when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) {
c_set := UIntToOH(bundle.c.bits.source)
c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U
c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U
c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U)
c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U)
monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra)
}
val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
d_opcodes_clr.suggestName("d_opcodes_clr")
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) {
assume((!bundle.d.ready) || bundle.c.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
when (c_set_wo_ready.orR) {
assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra)
}
}
inflight := (inflight | c_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.manager.endSinkId.W))
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val e_first = true.B
val d_set = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) {
d_set := UIntToOH(bundle.d.bits.sink)
assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra)
}
val e_clr = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) {
e_clr := UIntToOH(bundle.e.bits.sink)
monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra)
}
// edge.client.minLatency applies to BC, not DE
inflight := (inflight | d_set) & ~e_clr
}
def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = {
val sourceBits = log2Ceil(edge.client.endSourceId)
val tooBig = 14 // >16kB worth of flight information gets to be too much
if (sourceBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked")
} else {
if (args.edge.params(TestplanTestType).simulation) {
if (args.edge.params(TLMonitorStrictMode)) {
legalizeADSource(bundle, edge)
legalizeCDSource(bundle, edge)
} else {
legalizeADSourceOld(bundle, edge)
}
}
if (args.edge.params(TestplanTestType).formal) {
legalizeADSourceFormal(bundle, edge)
}
}
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
// legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize...
val sinkBits = log2Ceil(edge.manager.endSinkId)
if (sinkBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked")
} else {
legalizeDESink(bundle, edge)
}
}
}
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = {
legalizeFormat (bundle, edge)
legalizeMultibeat (bundle, edge)
legalizeUnique (bundle, edge)
}
}
File Misc.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
import chisel3.util.random.LFSR
import org.chipsalliance.cde.config.Parameters
import scala.math._
class ParameterizedBundle(implicit p: Parameters) extends Bundle
trait Clocked extends Bundle {
val clock = Clock()
val reset = Bool()
}
object DecoupledHelper {
def apply(rvs: Bool*) = new DecoupledHelper(rvs)
}
class DecoupledHelper(val rvs: Seq[Bool]) {
def fire(exclude: Bool, includes: Bool*) = {
require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!")
(rvs.filter(_ ne exclude) ++ includes).reduce(_ && _)
}
def fire() = {
rvs.reduce(_ && _)
}
}
object MuxT {
def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2))
def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3))
def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4))
}
/** Creates a cascade of n MuxTs to search for a key value. */
object MuxTLookup {
def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
}
object ValidMux {
def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = {
apply(v1 +: v2.toSeq)
}
def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = {
val out = Wire(Valid(valids.head.bits.cloneType))
out.valid := valids.map(_.valid).reduce(_ || _)
out.bits := MuxCase(valids.head.bits,
valids.map(v => (v.valid -> v.bits)))
out
}
}
object Str
{
def apply(s: String): UInt = {
var i = BigInt(0)
require(s.forall(validChar _))
for (c <- s)
i = (i << 8) | c
i.U((s.length*8).W)
}
def apply(x: Char): UInt = {
require(validChar(x))
x.U(8.W)
}
def apply(x: UInt): UInt = apply(x, 10)
def apply(x: UInt, radix: Int): UInt = {
val rad = radix.U
val w = x.getWidth
require(w > 0)
var q = x
var s = digit(q % rad)
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s)
}
s
}
def apply(x: SInt): UInt = apply(x, 10)
def apply(x: SInt, radix: Int): UInt = {
val neg = x < 0.S
val abs = x.abs.asUInt
if (radix != 10) {
Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix))
} else {
val rad = radix.U
val w = abs.getWidth
require(w > 0)
var q = abs
var s = digit(q % rad)
var needSign = neg
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
val placeSpace = q === 0.U
val space = Mux(needSign, Str('-'), Str(' '))
needSign = needSign && !placeSpace
s = Cat(Mux(placeSpace, space, digit(q % rad)), s)
}
Cat(Mux(needSign, Str('-'), Str(' ')), s)
}
}
private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0)
private def validChar(x: Char) = x == (x & 0xFF)
}
object Split
{
def apply(x: UInt, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n2: Int, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
}
object Random
{
def apply(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0)
else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod))
}
def apply(mod: Int): UInt = apply(mod, randomizer)
def oneHot(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0))
else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt
}
def oneHot(mod: Int): UInt = oneHot(mod, randomizer)
private def randomizer = LFSR(16)
private def partition(value: UInt, slices: Int) =
Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U)
}
object Majority {
def apply(in: Set[Bool]): Bool = {
val n = (in.size >> 1) + 1
val clauses = in.subsets(n).map(_.reduce(_ && _))
clauses.reduce(_ || _)
}
def apply(in: Seq[Bool]): Bool = apply(in.toSet)
def apply(in: UInt): Bool = apply(in.asBools.toSet)
}
object PopCountAtLeast {
private def two(x: UInt): (Bool, Bool) = x.getWidth match {
case 1 => (x.asBool, false.B)
case n =>
val half = x.getWidth / 2
val (leftOne, leftTwo) = two(x(half - 1, 0))
val (rightOne, rightTwo) = two(x(x.getWidth - 1, half))
(leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne))
}
def apply(x: UInt, n: Int): Bool = n match {
case 0 => true.B
case 1 => x.orR
case 2 => two(x)._2
case 3 => PopCount(x) >= n.U
}
}
// This gets used everywhere, so make the smallest circuit possible ...
// Given an address and size, create a mask of beatBytes size
// eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111
// groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01
object MaskGen {
def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = {
require (groupBy >= 1 && beatBytes >= groupBy)
require (isPow2(beatBytes) && isPow2(groupBy))
val lgBytes = log2Ceil(beatBytes)
val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U
def helper(i: Int): Seq[(Bool, Bool)] = {
if (i == 0) {
Seq((lgSize >= lgBytes.asUInt, true.B))
} else {
val sub = helper(i-1)
val size = sizeOH(lgBytes - i)
val bit = addr_lo(lgBytes - i)
val nbit = !bit
Seq.tabulate (1 << i) { j =>
val (sub_acc, sub_eq) = sub(j/2)
val eq = sub_eq && (if (j % 2 == 1) bit else nbit)
val acc = sub_acc || (size && eq)
(acc, eq)
}
}
}
if (groupBy == beatBytes) 1.U else
Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse)
}
}
File PlusArg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.experimental._
import chisel3.util.HasBlackBoxResource
@deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05")
case class PlusArgInfo(default: BigInt, docstring: String)
/** Case class for PlusArg information
*
* @tparam A scala type of the PlusArg value
* @param default optional default value
* @param docstring text to include in the help
* @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT)
*/
private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String)
/** Typeclass for converting a type to a doctype string
* @tparam A some type
*/
trait Doctypeable[A] {
/** Return the doctype string for some option */
def toDoctype(a: Option[A]): String
}
/** Object containing implementations of the Doctypeable typeclass */
object Doctypes {
/** Converts an Int => "INT" */
implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" }
/** Converts a BigInt => "INT" */
implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" }
/** Converts a String => "STRING" */
implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" }
}
class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map(
"FORMAT" -> StringParam(format),
"DEFAULT" -> IntParam(default),
"WIDTH" -> IntParam(width)
)) with HasBlackBoxResource {
val io = IO(new Bundle {
val out = Output(UInt(width.W))
})
addResource("/vsrc/plusarg_reader.v")
}
/* This wrapper class has no outputs, making it clear it is a simulation-only construct */
class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module {
val io = IO(new Bundle {
val count = Input(UInt(width.W))
})
val max = Module(new plusarg_reader(format, default, docstring, width)).io.out
when (max > 0.U) {
assert (io.count < max, s"Timeout exceeded: $docstring")
}
}
import Doctypes._
object PlusArg
{
/** PlusArg("foo") will return 42.U if the simulation is run with +foo=42
* Do not use this as an initial register value. The value is set in an
* initial block and thus accessing it from another initial is racey.
* Add a docstring to document the arg, which can be dumped in an elaboration
* pass.
*/
def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out
}
/** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert
* to kill the simulation when count exceeds the specified integer argument.
* Default 0 will never assert.
*/
def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count
}
}
object PlusArgArtefacts {
private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty
/* Add a new PlusArg */
@deprecated(
"Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08",
"Rocket Chip 2020.05"
)
def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring)
/** Add a new PlusArg
*
* @tparam A scala type of the PlusArg value
* @param name name for the PlusArg
* @param default optional default value
* @param docstring text to include in the help
*/
def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit =
artefacts = artefacts ++
Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default)))
/* From plus args, generate help text */
private def serializeHelp_cHeader(tab: String = ""): String = artefacts
.map{ case(arg, info) =>
s"""|$tab+$arg=${info.doctype}\\n\\
|$tab${" "*20}${info.docstring}\\n\\
|""".stripMargin ++ info.default.map{ case default =>
s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("")
}.toSeq.mkString("\\n\\\n") ++ "\""
/* From plus args, generate a char array of their names */
private def serializeArray_cHeader(tab: String = ""): String = {
val prettyTab = tab + " " * 44 // Length of 'static const ...'
s"${tab}static const char * verilog_plusargs [] = {\\\n" ++
artefacts
.map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" }
.mkString("")++
s"${prettyTab}0};"
}
/* Generate C code to be included in emulator.cc that helps with
* argument parsing based on available Verilog PlusArgs */
def serialize_cHeader(): String =
s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\
|${serializeHelp_cHeader(" "*7)}
|${serializeArray_cHeader()}
|""".stripMargin
}
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
File Parameters.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.diplomacy
import chisel3._
import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor}
import freechips.rocketchip.util.ShiftQueue
/** Options for describing the attributes of memory regions */
object RegionType {
// Define the 'more relaxed than' ordering
val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS)
sealed trait T extends Ordered[T] {
def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this)
}
case object CACHED extends T // an intermediate agent may have cached a copy of the region for you
case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided
case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible
case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached
case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects
case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed
case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively
}
// A non-empty half-open range; [start, end)
case class IdRange(start: Int, end: Int) extends Ordered[IdRange]
{
require (start >= 0, s"Ids cannot be negative, but got: $start.")
require (start <= end, "Id ranges cannot be negative.")
def compare(x: IdRange) = {
val primary = (this.start - x.start).signum
val secondary = (x.end - this.end).signum
if (primary != 0) primary else secondary
}
def overlaps(x: IdRange) = start < x.end && x.start < end
def contains(x: IdRange) = start <= x.start && x.end <= end
def contains(x: Int) = start <= x && x < end
def contains(x: UInt) =
if (size == 0) {
false.B
} else if (size == 1) { // simple comparison
x === start.U
} else {
// find index of largest different bit
val largestDeltaBit = log2Floor(start ^ (end-1))
val smallestCommonBit = largestDeltaBit + 1 // may not exist in x
val uncommonMask = (1 << smallestCommonBit) - 1
val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0)
// the prefix must match exactly (note: may shift ALL bits away)
(x >> smallestCommonBit) === (start >> smallestCommonBit).U &&
// firrtl constant prop range analysis can eliminate these two:
(start & uncommonMask).U <= uncommonBits &&
uncommonBits <= ((end-1) & uncommonMask).U
}
def shift(x: Int) = IdRange(start+x, end+x)
def size = end - start
def isEmpty = end == start
def range = start until end
}
object IdRange
{
def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else {
val ranges = s.sorted
(ranges.tail zip ranges.init) find { case (a, b) => a overlaps b }
}
}
// An potentially empty inclusive range of 2-powers [min, max] (in bytes)
case class TransferSizes(min: Int, max: Int)
{
def this(x: Int) = this(x, x)
require (min <= max, s"Min transfer $min > max transfer $max")
require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)")
require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max")
require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min")
require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)")
def none = min == 0
def contains(x: Int) = isPow2(x) && min <= x && x <= max
def containsLg(x: Int) = contains(1 << x)
def containsLg(x: UInt) =
if (none) false.B
else if (min == max) { log2Ceil(min).U === x }
else { log2Ceil(min).U <= x && x <= log2Ceil(max).U }
def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max)
def intersect(x: TransferSizes) =
if (x.max < min || max < x.min) TransferSizes.none
else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max))
// Not a union, because the result may contain sizes contained by neither term
// NOT TO BE CONFUSED WITH COVERPOINTS
def mincover(x: TransferSizes) = {
if (none) {
x
} else if (x.none) {
this
} else {
TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max))
}
}
override def toString() = "TransferSizes[%d, %d]".format(min, max)
}
object TransferSizes {
def apply(x: Int) = new TransferSizes(x)
val none = new TransferSizes(0)
def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _)
def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _)
implicit def asBool(x: TransferSizes) = !x.none
}
// AddressSets specify the address space managed by the manager
// Base is the base address, and mask are the bits consumed by the manager
// e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff
// e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ...
case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet]
{
// Forbid misaligned base address (and empty sets)
require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}")
require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous
// We do allow negative mask (=> ignore all high bits)
def contains(x: BigInt) = ((x ^ base) & ~mask) == 0
def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S
// turn x into an address contained in this set
def legalize(x: UInt): UInt = base.U | (mask.U & x)
// overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1)
def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0
// contains iff bitwise: x.mask => mask && contains(x.base)
def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0
// The number of bytes to which the manager must be aligned
def alignment = ((mask + 1) & ~mask)
// Is this a contiguous memory range
def contiguous = alignment == mask+1
def finite = mask >= 0
def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask }
// Widen the match function to ignore all bits in imask
def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask)
// Return an AddressSet that only contains the addresses both sets contain
def intersect(x: AddressSet): Option[AddressSet] = {
if (!overlaps(x)) {
None
} else {
val r_mask = mask & x.mask
val r_base = base | x.base
Some(AddressSet(r_base, r_mask))
}
}
def subtract(x: AddressSet): Seq[AddressSet] = {
intersect(x) match {
case None => Seq(this)
case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit =>
val nmask = (mask & (bit-1)) | remove.mask
val nbase = (remove.base ^ bit) & ~nmask
AddressSet(nbase, nmask)
}
}
}
// AddressSets have one natural Ordering (the containment order, if contiguous)
def compare(x: AddressSet) = {
val primary = (this.base - x.base).signum // smallest address first
val secondary = (x.mask - this.mask).signum // largest mask first
if (primary != 0) primary else secondary
}
// We always want to see things in hex
override def toString() = {
if (mask >= 0) {
"AddressSet(0x%x, 0x%x)".format(base, mask)
} else {
"AddressSet(0x%x, ~0x%x)".format(base, ~mask)
}
}
def toRanges = {
require (finite, "Ranges cannot be calculated on infinite mask")
val size = alignment
val fragments = mask & ~(size-1)
val bits = bitIndexes(fragments)
(BigInt(0) until (BigInt(1) << bits.size)).map { i =>
val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) }
AddressRange(off, size)
}
}
}
object AddressSet
{
val everything = AddressSet(0, -1)
def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = {
if (size == 0) tail.reverse else {
val maxBaseAlignment = base & (-base) // 0 for infinite (LSB)
val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size
val step =
if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment)
maxSizeAlignment else maxBaseAlignment
misaligned(base+step, size-step, AddressSet(base, step-1) +: tail)
}
}
def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = {
// Pair terms up by ignoring 'bit'
seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) =>
if (seq.size == 1) {
seq.head // singleton -> unaffected
} else {
key.copy(mask = key.mask | bit) // pair - widen mask by bit
}
}.toList
}
def unify(seq: Seq[AddressSet]): Seq[AddressSet] = {
val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _)
AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted
}
def enumerateMask(mask: BigInt): Seq[BigInt] = {
def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] =
if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail)
helper(0, Nil)
}
def enumerateBits(mask: BigInt): Seq[BigInt] = {
def helper(x: BigInt): Seq[BigInt] = {
if (x == 0) {
Nil
} else {
val bit = x & (-x)
bit +: helper(x & ~bit)
}
}
helper(mask)
}
}
case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean)
{
require (depth >= 0, "Buffer depth must be >= 0")
def isDefined = depth > 0
def latency = if (isDefined && !flow) 1 else 0
def apply[T <: Data](x: DecoupledIO[T]) =
if (isDefined) Queue(x, depth, flow=flow, pipe=pipe)
else x
def irrevocable[T <: Data](x: ReadyValidIO[T]) =
if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe)
else x
def sq[T <: Data](x: DecoupledIO[T]) =
if (!isDefined) x else {
val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe))
sq.io.enq <> x
sq.io.deq
}
override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "")
}
object BufferParams
{
implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false)
val default = BufferParams(2)
val none = BufferParams(0)
val flow = BufferParams(1, true, false)
val pipe = BufferParams(1, false, true)
}
case class TriStateValue(value: Boolean, set: Boolean)
{
def update(orig: Boolean) = if (set) value else orig
}
object TriStateValue
{
implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true)
def unset = TriStateValue(false, false)
}
trait DirectedBuffers[T] {
def copyIn(x: BufferParams): T
def copyOut(x: BufferParams): T
def copyInOut(x: BufferParams): T
}
trait IdMapEntry {
def name: String
def from: IdRange
def to: IdRange
def isCache: Boolean
def requestFifo: Boolean
def maxTransactionsInFlight: Option[Int]
def pretty(fmt: String) =
if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5
fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
} else {
fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
}
}
abstract class IdMap[T <: IdMapEntry] {
protected val fmt: String
val mapping: Seq[T]
def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n")
}
File Edges.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
class TLEdge(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdgeParameters(client, manager, params, sourceInfo)
{
def isAligned(address: UInt, lgSize: UInt): Bool = {
if (maxLgSize == 0) true.B else {
val mask = UIntToOH1(lgSize, maxLgSize)
(address & mask) === 0.U
}
}
def mask(address: UInt, lgSize: UInt): UInt =
MaskGen(address, lgSize, manager.beatBytes)
def staticHasData(bundle: TLChannel): Option[Boolean] = {
bundle match {
case _:TLBundleA => {
// Do there exist A messages with Data?
val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial
// Do there exist A messages without Data?
val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint
// Statically optimize the case where hasData is a constant
if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None
}
case _:TLBundleB => {
// Do there exist B messages with Data?
val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial
// Do there exist B messages without Data?
val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint
// Statically optimize the case where hasData is a constant
if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None
}
case _:TLBundleC => {
// Do there eixst C messages with Data?
val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe
// Do there exist C messages without Data?
val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe
if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None
}
case _:TLBundleD => {
// Do there eixst D messages with Data?
val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB
// Do there exist D messages without Data?
val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT
if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None
}
case _:TLBundleE => Some(false)
}
}
def isRequest(x: TLChannel): Bool = {
x match {
case a: TLBundleA => true.B
case b: TLBundleB => true.B
case c: TLBundleC => c.opcode(2) && c.opcode(1)
// opcode === TLMessages.Release ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(2) && !d.opcode(1)
// opcode === TLMessages.Grant ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
}
def isResponse(x: TLChannel): Bool = {
x match {
case a: TLBundleA => false.B
case b: TLBundleB => false.B
case c: TLBundleC => !c.opcode(2) || !c.opcode(1)
// opcode =/= TLMessages.Release &&
// opcode =/= TLMessages.ReleaseData
case d: TLBundleD => true.B // Grant isResponse + isRequest
case e: TLBundleE => true.B
}
}
def hasData(x: TLChannel): Bool = {
val opdata = x match {
case a: TLBundleA => !a.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case b: TLBundleB => !b.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case c: TLBundleC => c.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.ProbeAckData ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
staticHasData(x).map(_.B).getOrElse(opdata)
}
def opcode(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.opcode
case b: TLBundleB => b.opcode
case c: TLBundleC => c.opcode
case d: TLBundleD => d.opcode
}
}
def param(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.param
case b: TLBundleB => b.param
case c: TLBundleC => c.param
case d: TLBundleD => d.param
}
}
def size(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.size
case b: TLBundleB => b.size
case c: TLBundleC => c.size
case d: TLBundleD => d.size
}
}
def data(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.data
case b: TLBundleB => b.data
case c: TLBundleC => c.data
case d: TLBundleD => d.data
}
}
def corrupt(x: TLDataChannel): Bool = {
x match {
case a: TLBundleA => a.corrupt
case b: TLBundleB => b.corrupt
case c: TLBundleC => c.corrupt
case d: TLBundleD => d.corrupt
}
}
def mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.mask
case b: TLBundleB => b.mask
case c: TLBundleC => mask(c.address, c.size)
}
}
def full_mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => mask(a.address, a.size)
case b: TLBundleB => mask(b.address, b.size)
case c: TLBundleC => mask(c.address, c.size)
}
}
def address(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.address
case b: TLBundleB => b.address
case c: TLBundleC => c.address
}
}
def source(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.source
case b: TLBundleB => b.source
case c: TLBundleC => c.source
case d: TLBundleD => d.source
}
}
def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes)
def addr_lo(x: UInt): UInt =
if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0)
def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x))
def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x))
def numBeats(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 1.U
case bundle: TLDataChannel => {
val hasData = this.hasData(bundle)
val size = this.size(bundle)
val cutoff = log2Ceil(manager.beatBytes)
val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U
val decode = UIntToOH(size, maxLgSize+1) >> cutoff
Mux(hasData, decode | small.asUInt, 1.U)
}
}
}
def numBeats1(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 0.U
case bundle: TLDataChannel => {
if (maxLgSize == 0) {
0.U
} else {
val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes)
Mux(hasData(bundle), decode, 0.U)
}
}
}
}
def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val beats1 = numBeats1(bits)
val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W))
val counter1 = counter - 1.U
val first = counter === 0.U
val last = counter === 1.U || beats1 === 0.U
val done = last && fire
val count = (beats1 & ~counter1)
when (fire) {
counter := Mux(first, beats1, counter1)
}
(first, last, done, count)
}
def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1
def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire)
def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid)
def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2
def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire)
def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid)
def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3
def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire)
def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid)
def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3)
}
def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire)
def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid)
def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4)
}
def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire)
def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid)
def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes))
}
def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire)
def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid)
// Does the request need T permissions to be executed?
def needT(a: TLBundleA): Bool = {
val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLPermissions.NtoB -> false.B,
TLPermissions.NtoT -> true.B,
TLPermissions.BtoT -> true.B))
MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array(
TLMessages.PutFullData -> true.B,
TLMessages.PutPartialData -> true.B,
TLMessages.ArithmeticData -> true.B,
TLMessages.LogicalData -> true.B,
TLMessages.Get -> false.B,
TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLHints.PREFETCH_READ -> false.B,
TLHints.PREFETCH_WRITE -> true.B)),
TLMessages.AcquireBlock -> acq_needT,
TLMessages.AcquirePerm -> acq_needT))
}
// This is a very expensive circuit; use only if you really mean it!
def inFlight(x: TLBundle): (UInt, UInt) = {
val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W))
val bce = manager.anySupportAcquireB && client.anySupportProbe
val (a_first, a_last, _) = firstlast(x.a)
val (b_first, b_last, _) = firstlast(x.b)
val (c_first, c_last, _) = firstlast(x.c)
val (d_first, d_last, _) = firstlast(x.d)
val (e_first, e_last, _) = firstlast(x.e)
val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits))
val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits))
val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits))
val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits))
val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits))
val a_inc = x.a.fire && a_first && a_request
val b_inc = x.b.fire && b_first && b_request
val c_inc = x.c.fire && c_first && c_request
val d_inc = x.d.fire && d_first && d_request
val e_inc = x.e.fire && e_first && e_request
val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil))
val a_dec = x.a.fire && a_last && a_response
val b_dec = x.b.fire && b_last && b_response
val c_dec = x.c.fire && c_last && c_response
val d_dec = x.d.fire && d_last && d_response
val e_dec = x.e.fire && e_last && e_response
val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil))
val next_flight = flight + PopCount(inc) - PopCount(dec)
flight := next_flight
(flight, next_flight)
}
def prettySourceMapping(context: String): String = {
s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n"
}
}
class TLEdgeOut(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
// Transfers
def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquireBlock
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquirePerm
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.Release
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ReleaseData
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) =
Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B)
def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAck
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions, data)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAckData
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B)
def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink)
def GrantAck(toSink: UInt): TLBundleE = {
val e = Wire(new TLBundleE(bundle))
e.sink := toSink
e
}
// Accesses
def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsGetFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Get
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutFullFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutFullData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, mask, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutPartialFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutPartialData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = {
require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsArithmeticFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.ArithmeticData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsLogicalFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.LogicalData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = {
require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsHintFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Hint
a.param := param
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data)
def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAckData
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size)
def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.HintAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
}
class TLEdgeIn(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = {
val todo = x.filter(!_.isEmpty)
val heads = todo.map(_.head)
val tails = todo.map(_.tail)
if (todo.isEmpty) Nil else { heads +: myTranspose(tails) }
}
// Transfers
def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = {
require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}")
val legal = client.supportsProbe(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Probe
b.param := capPermissions
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.Grant
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.GrantData
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B)
def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.ReleaseAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
// Accesses
def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = {
require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsGet(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Get
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsPutFull(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutFullData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, mask, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsPutPartial(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutPartialData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsArithmetic(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.ArithmeticData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsLogical(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.LogicalData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = {
require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsHint(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Hint
b.param := param
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size)
def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied)
def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data)
def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAckData
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B)
def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied)
def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B)
def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.HintAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
}
| module TLMonitor_54( // @[Monitor.scala:36:7]
input clock, // @[Monitor.scala:36:7]
input reset, // @[Monitor.scala:36:7]
input io_in_a_ready, // @[Monitor.scala:20:14]
input io_in_a_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_size, // @[Monitor.scala:20:14]
input [5:0] io_in_a_bits_source, // @[Monitor.scala:20:14]
input [31:0] io_in_a_bits_address, // @[Monitor.scala:20:14]
input [7:0] io_in_a_bits_mask, // @[Monitor.scala:20:14]
input io_in_a_bits_corrupt, // @[Monitor.scala:20:14]
input io_in_b_ready, // @[Monitor.scala:20:14]
input io_in_b_valid, // @[Monitor.scala:20:14]
input [1:0] io_in_b_bits_param, // @[Monitor.scala:20:14]
input [5:0] io_in_b_bits_source, // @[Monitor.scala:20:14]
input [31:0] io_in_b_bits_address, // @[Monitor.scala:20:14]
input io_in_c_ready, // @[Monitor.scala:20:14]
input io_in_c_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_c_bits_opcode, // @[Monitor.scala:20:14]
input [2:0] io_in_c_bits_param, // @[Monitor.scala:20:14]
input [2:0] io_in_c_bits_size, // @[Monitor.scala:20:14]
input [5:0] io_in_c_bits_source, // @[Monitor.scala:20:14]
input [31:0] io_in_c_bits_address, // @[Monitor.scala:20:14]
input io_in_c_bits_corrupt, // @[Monitor.scala:20:14]
input io_in_d_ready, // @[Monitor.scala:20:14]
input io_in_d_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14]
input [1:0] io_in_d_bits_param, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_size, // @[Monitor.scala:20:14]
input [5:0] io_in_d_bits_source, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_sink, // @[Monitor.scala:20:14]
input io_in_d_bits_denied, // @[Monitor.scala:20:14]
input io_in_d_bits_corrupt, // @[Monitor.scala:20:14]
input io_in_e_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_e_bits_sink // @[Monitor.scala:20:14]
);
wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11]
wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11]
wire [12:0] _GEN = {10'h0, io_in_a_bits_size}; // @[package.scala:243:71]
wire [12:0] _GEN_0 = {10'h0, io_in_c_bits_size}; // @[package.scala:243:71]
wire _a_first_T_1 = io_in_a_ready & io_in_a_valid; // @[Decoupled.scala:51:35]
reg [2:0] a_first_counter; // @[Edges.scala:229:27]
reg [2:0] opcode; // @[Monitor.scala:387:22]
reg [2:0] param; // @[Monitor.scala:388:22]
reg [2:0] size; // @[Monitor.scala:389:22]
reg [5:0] source; // @[Monitor.scala:390:22]
reg [31:0] address; // @[Monitor.scala:391:22]
wire _d_first_T_3 = io_in_d_ready & io_in_d_valid; // @[Decoupled.scala:51:35]
reg [2:0] d_first_counter; // @[Edges.scala:229:27]
reg [2:0] opcode_1; // @[Monitor.scala:538:22]
reg [1:0] param_1; // @[Monitor.scala:539:22]
reg [2:0] size_1; // @[Monitor.scala:540:22]
reg [5:0] source_1; // @[Monitor.scala:541:22]
reg [2:0] sink; // @[Monitor.scala:542:22]
reg denied; // @[Monitor.scala:543:22]
reg [2:0] b_first_counter; // @[Edges.scala:229:27]
reg [1:0] param_2; // @[Monitor.scala:411:22]
reg [5:0] source_2; // @[Monitor.scala:413:22]
reg [31:0] address_1; // @[Monitor.scala:414:22]
wire _c_first_T_1 = io_in_c_ready & io_in_c_valid; // @[Decoupled.scala:51:35]
reg [2:0] c_first_counter; // @[Edges.scala:229:27]
reg [2:0] opcode_3; // @[Monitor.scala:515:22]
reg [2:0] param_3; // @[Monitor.scala:516:22]
reg [2:0] size_3; // @[Monitor.scala:517:22]
reg [5:0] source_3; // @[Monitor.scala:518:22]
reg [31:0] address_2; // @[Monitor.scala:519:22]
reg [62:0] inflight; // @[Monitor.scala:614:27]
reg [251:0] inflight_opcodes; // @[Monitor.scala:616:35]
reg [251:0] inflight_sizes; // @[Monitor.scala:618:33]
reg [2:0] a_first_counter_1; // @[Edges.scala:229:27]
wire a_first_1 = a_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25]
reg [2:0] d_first_counter_1; // @[Edges.scala:229:27]
wire d_first_1 = d_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25]
wire [63:0] _GEN_1 = {58'h0, io_in_a_bits_source}; // @[OneHot.scala:58:35]
wire _GEN_2 = _a_first_T_1 & a_first_1; // @[Decoupled.scala:51:35]
wire d_release_ack = io_in_d_bits_opcode == 3'h6; // @[Monitor.scala:673:46]
wire _GEN_3 = io_in_d_bits_opcode != 3'h6; // @[Monitor.scala:673:46, :674:74]
wire [63:0] _GEN_4 = {58'h0, io_in_d_bits_source}; // @[OneHot.scala:58:35]
reg [31:0] watchdog; // @[Monitor.scala:709:27]
reg [62:0] inflight_1; // @[Monitor.scala:726:35]
reg [251:0] inflight_sizes_1; // @[Monitor.scala:728:35]
reg [2:0] c_first_counter_1; // @[Edges.scala:229:27]
wire c_first_1 = c_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25]
reg [2:0] d_first_counter_2; // @[Edges.scala:229:27]
wire d_first_2 = d_first_counter_2 == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _GEN_5 = io_in_c_bits_opcode[2] & io_in_c_bits_opcode[1]; // @[Edges.scala:68:{36,40,51}]
wire [63:0] _GEN_6 = {58'h0, io_in_c_bits_source}; // @[OneHot.scala:58:35]
wire _GEN_7 = _c_first_T_1 & c_first_1 & _GEN_5; // @[Decoupled.scala:51:35]
reg [31:0] watchdog_1; // @[Monitor.scala:818:27]
reg [6:0] inflight_2; // @[Monitor.scala:828:27]
reg [2:0] d_first_counter_3; // @[Edges.scala:229:27]
wire d_first_3 = d_first_counter_3 == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _GEN_8 = _d_first_T_3 & d_first_3 & io_in_d_bits_opcode[2] & ~(io_in_d_bits_opcode[1]); // @[Decoupled.scala:51:35]
wire [7:0] _d_set_T = 8'h1 << io_in_d_bits_sink; // @[OneHot.scala:58:35]
wire [6:0] d_set = _GEN_8 ? _d_set_T[6:0] : 7'h0; // @[OneHot.scala:58:35] |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File SynchronizerReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util.{RegEnable, Cat}
/** These wrap behavioral
* shift and next registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
*
* These are built up of *ResetSynchronizerPrimitiveShiftReg,
* intended to be replaced by the integrator's metastable flops chains or replaced
* at this level if they have a multi-bit wide synchronizer primitive.
* The different types vary in their reset behavior:
* NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin
* AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep
* 1-bit-wide shift registers.
* SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg
*
* [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference.
*
* ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross
* Clock Domains.
*/
object SynchronizerResetType extends Enumeration {
val NonSync, Inferred, Sync, Async = Value
}
// Note: this should not be used directly.
// Use the companion object to generate this with the correct reset type mixin.
private class SynchronizerPrimitiveShiftReg(
sync: Int,
init: Boolean,
resetType: SynchronizerResetType.Value)
extends AbstractPipelineReg(1) {
val initInt = if (init) 1 else 0
val initPostfix = resetType match {
case SynchronizerResetType.NonSync => ""
case _ => s"_i${initInt}"
}
override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}"
val chain = List.tabulate(sync) { i =>
val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B)
reg.suggestName(s"sync_$i")
}
chain.last := io.d.asBool
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink := source
}
io.q := chain.head.asUInt
}
private object SynchronizerPrimitiveShiftReg {
def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = {
val gen: () => SynchronizerPrimitiveShiftReg = resetType match {
case SynchronizerResetType.NonSync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
case SynchronizerResetType.Async =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset
case SynchronizerResetType.Sync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset
case SynchronizerResetType.Inferred =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
}
AbstractPipelineReg(gen(), in)
}
}
// Note: This module may end up with a non-AsyncReset type reset.
// But the Primitives within will always have AsyncReset type.
class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int)
extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asAsyncReset){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async)
}
}
io.q := Cat(output.reverse)
}
object AsyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
// Note: This module may end up with a non-Bool type reset.
// But the Primitives within will always have Bool reset type.
@deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2")
class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asBool){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync)
}
}
io.q := Cat(output.reverse)
}
object SyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred)
}
io.q := Cat(output.reverse)
}
object ResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}"
val output = Seq.tabulate(w) { i =>
SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync)
}
io.q := Cat(output.reverse)
}
object SynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, None)
def apply [T <: Data](in: T): T =
apply (in, 3, None)
}
class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module {
override def desiredName = s"ClockCrossingReg_w${w}"
val io = IO(new Bundle{
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
val en = Input(Bool())
})
val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en)
io.q := cdc_reg
}
object ClockCrossingReg {
def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = {
val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit))
name.foreach{ cdc_reg.suggestName(_) }
cdc_reg.io.d := in.asUInt
cdc_reg.io.en := en
cdc_reg.io.q.asTypeOf(in)
}
}
| module AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_241( // @[SynchronizerReg.scala:68:19]
input clock, // @[SynchronizerReg.scala:68:19]
input reset, // @[SynchronizerReg.scala:68:19]
output io_q // @[ShiftReg.scala:36:14]
);
wire io_d = 1'h1; // @[SynchronizerReg.scala:54:22, :68:19]
wire _sync_2_T = 1'h1; // @[SynchronizerReg.scala:54:22, :68:19]
wire io_q_0; // @[SynchronizerReg.scala:68:19]
reg sync_0; // @[SynchronizerReg.scala:51:87]
assign io_q_0 = sync_0; // @[SynchronizerReg.scala:51:87, :68:19]
reg sync_1; // @[SynchronizerReg.scala:51:87]
reg sync_2; // @[SynchronizerReg.scala:51:87]
always @(posedge clock or posedge reset) begin // @[SynchronizerReg.scala:68:19]
if (reset) begin // @[SynchronizerReg.scala:68:19]
sync_0 <= 1'h0; // @[SynchronizerReg.scala:51:87]
sync_1 <= 1'h0; // @[SynchronizerReg.scala:51:87]
sync_2 <= 1'h0; // @[SynchronizerReg.scala:51:87]
end
else begin // @[SynchronizerReg.scala:68:19]
sync_0 <= sync_1; // @[SynchronizerReg.scala:51:87]
sync_1 <= sync_2; // @[SynchronizerReg.scala:51:87]
sync_2 <= 1'h1; // @[SynchronizerReg.scala:51:87, :54:22, :68:19]
end
always @(posedge, posedge) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.